code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
def snake_case_ (UpperCamelCase : list[list[int]] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : set ):
'''simple docstring'''
_a , _a = len(UpperCamelCase ), len(grid[0] )
if (
min(UpperCamelCase , UpperCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_a = 0
count += depth_first_search(UpperCamelCase , row + 1 , UpperCamelCase , UpperCamelCase )
count += depth_first_search(UpperCamelCase , row - 1 , UpperCamelCase , UpperCamelCase )
count += depth_first_search(UpperCamelCase , UpperCamelCase , col + 1 , UpperCamelCase )
count += depth_first_search(UpperCamelCase , UpperCamelCase , col - 1 , UpperCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_UpperCAmelCase : List[Any] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
_UpperCAmelCase : Union[str, Any] = {
"camembert-base": 512,
}
_UpperCAmelCase : Dict = "▁"
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = VOCAB_FILES_NAMES
UpperCamelCase_ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :Dict = ['input_ids', 'attention_mask']
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : Tuple="</s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE_ : str="<mask>" , SCREAMING_SNAKE_CASE_ : int=["<s>NOTUSED", "</s>NOTUSED"] , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCAmelCase__ = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
lowerCAmelCase__ = len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case ( self : List[Any] ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __snake_case ( self : int ):
lowerCAmelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __getstate__( self : Optional[Any] ):
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : str , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 668 | 0 |
def _lowerCAmelCase( __A ):
UpperCAmelCase = len(__A )
UpperCAmelCase = len(matrix[0] )
UpperCAmelCase = min(__A , __A )
for row in range(__A ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , __A ):
UpperCAmelCase = matrix[col][row] / matrix[row][row]
for i in range(__A , __A ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCAmelCase = True
for i in range(row + 1 , __A ):
if matrix[i][row] != 0:
UpperCAmelCase , UpperCAmelCase = matrix[i], matrix[row]
UpperCAmelCase = False
break
if reduce:
rank -= 1
for i in range(__A ):
UpperCAmelCase = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _UpperCamelCase ( self : int ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : List[Any] ) -> Dict:
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=0.9 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : Optional[int]=0.5 ) -> Any:
if NLTK_VERSION >= version.Version("3.6.5" ):
UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(lowerCAmelCase__ ) , word_tokenize(lowerCAmelCase__ ) , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
else:
UpperCAmelCase = [
meteor_score.single_meteor_score(lowerCAmelCase__ , lowerCAmelCase__ , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
return {"meteor": np.mean(lowerCAmelCase__ )}
| 1 | 0 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :List[Any] ):
with open(UpperCamelCase__ ) as metadata_file:
SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE : Tuple = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''module''']
# Load the entity vocab file
SCREAMING_SNAKE_CASE : List[str] = load_original_entity_vocab(UpperCamelCase__ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE : List[str] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE : Tuple = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken('''<ent>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = AddedToken('''<ent2>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE : Dict = json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = '''MLukeTokenizer'''
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = MLukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
SCREAMING_SNAKE_CASE : Any = state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE : Union[str, Any] = word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE : Any = word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE : str = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE : Dict = state_dict[bias_name]
SCREAMING_SNAKE_CASE : int = decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE : Dict = F'''encoder.layer.{layer_index}.attention.self.'''
SCREAMING_SNAKE_CASE : Tuple = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE : int = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE : Optional[int] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE : str = state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE : List[str] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE : str = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE : Dict = state_dict['''entity_predictions.bias''']
SCREAMING_SNAKE_CASE : Union[str, Any] = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[str] = torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE : Optional[int] = LukeForMaskedLM(config=UpperCamelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
SCREAMING_SNAKE_CASE : List[str] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict[key]
else:
SCREAMING_SNAKE_CASE : List[Any] = state_dict[key]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(UpperCamelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE : List[str] = MLukeTokenizer.from_pretrained(UpperCamelCase__ , task='''entity_classification''' )
SCREAMING_SNAKE_CASE : Optional[Any] = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
SCREAMING_SNAKE_CASE : Optional[int] = (0, 9)
SCREAMING_SNAKE_CASE : Any = tokenizer(UpperCamelCase__ , entity_spans=[span] , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : str = model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 33, 7_68) )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, 1, 7_68) )
SCREAMING_SNAKE_CASE : Any = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE : str = MLukeTokenizer.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = '''Tokyo is the capital of <mask>.'''
SCREAMING_SNAKE_CASE : int = (24, 30)
SCREAMING_SNAKE_CASE : int = tokenizer(UpperCamelCase__ , entity_spans=[span] , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Tuple = model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = encoding['''input_ids'''][0].tolist()
SCREAMING_SNAKE_CASE : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
SCREAMING_SNAKE_CASE : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE : Union[str, Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def __lowercase (_SCREAMING_SNAKE_CASE :str ):
SCREAMING_SNAKE_CASE : int = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
SCREAMING_SNAKE_CASE : Dict = [json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE : int = {}
for entry in data:
SCREAMING_SNAKE_CASE : Dict = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE : Tuple = entity_id
break
SCREAMING_SNAKE_CASE : List[str] = F'''{language}:{entity_name}'''
SCREAMING_SNAKE_CASE : str = entity_id
return new_mapping
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
snake_case_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 507 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__lowerCamelCase = {
'wmt16-en-de-dist-12-1': [28.3, 27.52],
'wmt16-en-de-dist-6-1': [27.4, 27.11],
'wmt16-en-de-12-1': [26.9, 25.75],
}
__lowerCamelCase = F"""{src_lang}-{tgt_lang}"""
__lowerCamelCase = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
__lowerCamelCase = os.path.join(UpperCamelCase__ , 'README.md' )
print(F"""Generating {path}""" )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
__A = Path(__file__).resolve().parent.parent.parent
__A = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__A = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 469 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class A__ ( UpperCamelCase_ ):
lowercase = "efficientnet"
def __init__( self , UpperCamelCase__ = 3 , UpperCamelCase__ = 600 , UpperCamelCase__ = 2.0 , UpperCamelCase__ = 3.1 , UpperCamelCase__ = 8 , UpperCamelCase__ = [3, 3, 5, 3, 5, 5, 3] , UpperCamelCase__ = [32, 16, 24, 40, 80, 112, 192] , UpperCamelCase__ = [16, 24, 40, 80, 112, 192, 320] , UpperCamelCase__ = [] , UpperCamelCase__ = [1, 2, 2, 2, 1, 2, 1] , UpperCamelCase__ = [1, 2, 2, 3, 3, 4, 1] , UpperCamelCase__ = [1, 6, 6, 6, 6, 6, 6] , UpperCamelCase__ = 0.25 , UpperCamelCase__ = "swish" , UpperCamelCase__ = 2560 , UpperCamelCase__ = "mean" , UpperCamelCase__ = 0.02 , UpperCamelCase__ = 0.001 , UpperCamelCase__ = 0.99 , UpperCamelCase__ = 0.5 , UpperCamelCase__ = 0.2 , **UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = num_channels
A_ = image_size
A_ = width_coefficient
A_ = depth_coefficient
A_ = depth_divisor
A_ = kernel_sizes
A_ = in_channels
A_ = out_channels
A_ = depthwise_padding
A_ = strides
A_ = num_block_repeats
A_ = expand_ratios
A_ = squeeze_expansion_ratio
A_ = hidden_act
A_ = hidden_dim
A_ = pooling_type
A_ = initializer_range
A_ = batch_norm_eps
A_ = batch_norm_momentum
A_ = dropout_rate
A_ = drop_connect_rate
A_ = sum(UpperCamelCase__ ) * 4
class A__ ( UpperCamelCase_ ):
lowercase = version.parse("1.11" )
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ) -> float:
'''simple docstring'''
return 1e-5
| 719 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 667 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCAmelCase = 250_004
__UpperCAmelCase = 250_020
@require_sentencepiece
@require_tokenizers
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[str] = MBartaaTokenizer
lowercase__ : Union[str, Any] = MBartaaTokenizerFast
lowercase__ : Tuple = True
lowercase__ : Optional[Any] = True
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = '''<s>'''
lowerCAmelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase_ ) , 10_54 )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# fmt: off
lowerCAmelCase__ = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase__ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCAmelCase__ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
'''simple docstring'''
lowercase__ : Union[str, Any] = "facebook/mbart-large-50-one-to-many-mmt"
lowercase__ : List[Any] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
lowercase__ : int = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
lowercase__ : Tuple = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ) -> int:
lowerCAmelCase__ = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
lowerCAmelCase__ = 1
return cls
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids )
lowerCAmelCase__ = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
lowerCAmelCase__ = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCamelCase_ )
lowerCAmelCase__ = 10
lowerCAmelCase__ = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0]
self.assertEqual(ids[0] , lowerCamelCase_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = MBartaaTokenizer.from_pretrained(lowerCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , return_tensors='''pt''' )
lowerCAmelCase__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowerCAmelCase__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCAmelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors='''pt''' )
lowerCAmelCase__ = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10 , return_tensors='''pt''' )
lowerCAmelCase__ = targets['''input_ids''']
lowerCAmelCase__ = shift_tokens_right(lowerCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[25_00_04, 62, 30_34, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , ) | 90 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCAmelCase = False
class a__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = generator.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''cyberpunk 2077'''
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = '''A painting of a squirrel eating a burger '''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.text_to_image(
prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = pipe.image_variation(lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 90 | 1 |
'''simple docstring'''
import numpy as np
import qiskit
def lowerCamelCase ( UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : int | None = None ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = np.random.default_rng(seed=UpperCAmelCase__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
SCREAMING_SNAKE_CASE__ :Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
SCREAMING_SNAKE_CASE__ :Optional[Any] = rng.integers(2 , size=UpperCAmelCase__ )
# The set of states Alice will prepare.
SCREAMING_SNAKE_CASE__ :Union[str, Any] = rng.integers(2 , size=UpperCAmelCase__ )
# Measurement basis for Bob's qubits.
SCREAMING_SNAKE_CASE__ :Optional[Any] = rng.integers(2 , size=UpperCAmelCase__ )
# Quantum Circuit to simulate BB84
SCREAMING_SNAKE_CASE__ :Union[str, Any] = qiskit.QuantumCircuit(UpperCAmelCase__ , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(UpperCAmelCase__ ):
if alice_state[index] == 1:
bbaa_circ.x(UpperCAmelCase__ )
if alice_basis[index] == 1:
bbaa_circ.h(UpperCAmelCase__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(UpperCAmelCase__ ):
if bob_basis[index] == 1:
bbaa_circ.h(UpperCAmelCase__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
SCREAMING_SNAKE_CASE__ :Tuple = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
SCREAMING_SNAKE_CASE__ :Optional[int] = qiskit.execute(UpperCAmelCase__ , UpperCAmelCase__ , shots=1 , seed_simulator=UpperCAmelCase__ )
# Returns the result of measurement.
SCREAMING_SNAKE_CASE__ :Optional[int] = job.result().get_counts(UpperCAmelCase__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
SCREAMING_SNAKE_CASE__ :Tuple = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
SCREAMING_SNAKE_CASE__ :List[Any] = gen_key[:key_len] if len(UpperCAmelCase__ ) >= key_len else gen_key.ljust(UpperCAmelCase__ , '0' )
return key
if __name__ == "__main__":
print(f"The generated key is : {bbaa(8, seed=0)}")
from doctest import testmod
testmod()
| 320 | '''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
def __init__( self : Tuple , UpperCamelCase_ : NestedDataStructureLike[PathLike] , UpperCamelCase_ : Optional[NamedSplit] = None , UpperCamelCase_ : Optional[Features] = None , UpperCamelCase_ : str = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[int] = None , **UpperCamelCase_ : List[str] , ) -> str:
super().__init__(
UpperCamelCase_ , split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ :int = path_or_paths if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE__ :List[Any] = Text(
cache_dir=UpperCamelCase_ , data_files=UpperCamelCase_ , features=UpperCamelCase_ , **UpperCamelCase_ , )
def __lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]:
# Build iterable dataset
if self.streaming:
SCREAMING_SNAKE_CASE__ :int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE__ :str = None
SCREAMING_SNAKE_CASE__ :Union[str, Any] = None
SCREAMING_SNAKE_CASE__ :Optional[int] = None
SCREAMING_SNAKE_CASE__ :Tuple = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE__ :int = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 320 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = []
for part_id in partition_order:
__snake_case = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(2 )
__snake_case = [1, 0]
__snake_case = _generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(1 )
__snake_case = SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__snake_case = lambda _lowerCamelCase : x.reverse()
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 24 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__magic_name__ = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 314 |
__magic_name__ = range(2, 20 + 1)
__magic_name__ = [10**k for k in range(ks[-1] + 1)]
__magic_name__ = {}
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase = sum(a_i[j] for j in range(_UpperCAmelCase , len(_UpperCAmelCase ) ) )
lowercase = sum(a_i[j] * base[j] for j in range(min(len(_UpperCAmelCase ) , _UpperCAmelCase ) ) )
lowercase , lowercase = 0, 0
lowercase = n - i
lowercase = memo.get(_UpperCAmelCase )
if sub_memo is not None:
lowercase = sub_memo.get(_UpperCAmelCase )
if jumps is not None and len(_UpperCAmelCase ) > 0:
# find and make the largest jump without going over
lowercase = -1
for _k in range(len(_UpperCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowercase = _k
break
if max_jump >= 0:
lowercase , lowercase , lowercase = jumps[max_jump]
# since the difference between jumps is cached, add c
lowercase = diff + c
for j in range(min(_UpperCAmelCase , len(_UpperCAmelCase ) ) ):
lowercase , lowercase = divmod(_UpperCAmelCase , 10 )
if new_c > 0:
add(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
lowercase = []
else:
lowercase = {c: []}
lowercase = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowercase , lowercase = next_term(_UpperCAmelCase , k - 1 , i + dn , _UpperCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowercase , lowercase = compute(_UpperCAmelCase , _UpperCAmelCase , i + dn , _UpperCAmelCase )
diff += _diff
dn += terms_jumped
lowercase = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowercase = 0
while j < len(_UpperCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_UpperCAmelCase , (diff, dn, k) )
return (diff, dn)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(_UpperCAmelCase ):
a_i.extend([0 for _ in range(k - len(_UpperCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowercase = i
lowercase , lowercase , lowercase = 0, 0, 0
for j in range(len(_UpperCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowercase = ds_c + ds_b
diff += addend
lowercase = 0
for j in range(_UpperCAmelCase ):
lowercase = a_i[j] + addend
lowercase , lowercase = divmod(_UpperCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return diff, i - start_i
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
for j in range(_UpperCAmelCase , len(_UpperCAmelCase ) ):
lowercase = digits[j] + addend
if s >= 10:
lowercase , lowercase = divmod(_UpperCAmelCase , 10 )
lowercase = addend // 10 + quotient
else:
lowercase = s
lowercase = addend // 10
if addend == 0:
break
while addend > 0:
lowercase , lowercase = divmod(_UpperCAmelCase , 10 )
digits.append(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase = 10**15 ):
"""simple docstring"""
lowercase = [1]
lowercase = 1
lowercase = 0
while True:
lowercase , lowercase = next_term(_UpperCAmelCase , 20 , i + dn , _UpperCAmelCase )
dn += terms_jumped
if dn == n - i:
break
lowercase = 0
for j in range(len(_UpperCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 314 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def _a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int=False ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def _a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]=False ) -> Optional[int]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ = ""
else:
lowerCAmelCase__ = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def _a ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = DeiTConfig()
# all deit models have fine-tuned heads
lowerCAmelCase__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = "huggingface/label-files"
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="dataset" ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ = int(deit_name[-6:-4] )
lowerCAmelCase__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
lowerCAmelCase__ = 192
lowerCAmelCase__ = 768
lowerCAmelCase__ = 12
lowerCAmelCase__ = 3
elif deit_name[9:].startswith("small" ):
lowerCAmelCase__ = 384
lowerCAmelCase__ = 1_536
lowerCAmelCase__ = 12
lowerCAmelCase__ = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
# load original model from timm
lowerCAmelCase__ = timm.create_model(UpperCamelCase_ , pretrained=UpperCamelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ = timm_model.state_dict()
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ , UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# load HuggingFace model
lowerCAmelCase__ = DeiTForImageClassificationWithTeacher(UpperCamelCase_ ).eval()
model.load_state_dict(UpperCamelCase_ )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCAmelCase__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCAmelCase__ = DeiTImageProcessor(size=UpperCamelCase_ , crop_size=config.image_size )
lowerCAmelCase__ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCAmelCase__ = encoding["pixel_values"]
lowerCAmelCase__ = model(UpperCamelCase_ )
lowerCAmelCase__ = timm_model(UpperCamelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase_ , outputs.logits , atol=1e-3 )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(F"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a_ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 339 |
from math import factorial
def _a ( UpperCamelCase_ : int = 20 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowerCAmelCase__ = n // 2
return int(factorial(UpperCamelCase_ ) / (factorial(UpperCamelCase_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 339 | 1 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase__ = 16
UpperCamelCase__ = 32
def _UpperCamelCase (a__ :Accelerator , a__ :DatasetDict , a__ :List[int] , a__ :List[int] , a__ :int = 16 ):
"""simple docstring"""
UpperCamelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase__ = DatasetDict(
{
"""train""": dataset["""train"""].select(a__ ),
"""validation""": dataset["""train"""].select(a__ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(a__ :Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=a__ , max_length=a__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase__ = datasets.map(
a__ , batched=a__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(a__ :int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase__ = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase__ = 8
else:
UpperCamelCase__ = None
return tokenizer.pad(
a__ , padding="""longest""" , max_length=a__ , pad_to_multiple_of=a__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCamelCase__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
UpperCamelCase__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
UpperCamelCase__ = DataLoader(
tokenized_datasets["""test"""] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
return train_dataloader, eval_dataloader, test_dataloader
def _UpperCamelCase (a__ :Any , a__ :int ):
"""simple docstring"""
UpperCamelCase__ = []
# Download the dataset
UpperCamelCase__ = load_dataset("""glue""" , """mrpc""" )
# Create our splits
UpperCamelCase__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
UpperCamelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ = config["""lr"""]
UpperCamelCase__ = int(config["""num_epochs"""] )
UpperCamelCase__ = int(config["""seed"""] )
UpperCamelCase__ = int(config["""batch_size"""] )
UpperCamelCase__ = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
UpperCamelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCamelCase__ = batch_size // MAX_GPU_BATCH_SIZE
UpperCamelCase__ = MAX_GPU_BATCH_SIZE
set_seed(a__ )
# New Code #
# Create our folds:
UpperCamelCase__ = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
UpperCamelCase__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(a__ ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = get_fold_dataloaders(
a__ , a__ , a__ , a__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=a__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase__ = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase__ = AdamW(params=model.parameters() , lr=a__ )
# Instantiate scheduler
UpperCamelCase__ = get_linear_schedule_with_warmup(
optimizer=a__ , num_warmup_steps=100 , num_training_steps=(len(a__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ )
# Now we train the model
for epoch in range(a__ ):
model.train()
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase__ = model(**a__ )
UpperCamelCase__ = outputs.loss
UpperCamelCase__ = loss / gradient_accumulation_steps
accelerator.backward(a__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ = model(**a__ )
UpperCamelCase__ = outputs.logits.argmax(dim=-1 )
UpperCamelCase__ , UpperCamelCase__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=a__ , references=a__ , )
UpperCamelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , a__ )
# New Code #
# We also run predictions on the test set at the very end
UpperCamelCase__ = []
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ = model(**a__ )
UpperCamelCase__ = outputs.logits
UpperCamelCase__ , UpperCamelCase__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(a__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
UpperCamelCase__ = torch.cat(a__ , dim=0 )
UpperCamelCase__ = torch.stack(a__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
UpperCamelCase__ = metric.compute(predictions=a__ , references=a__ )
accelerator.print("""Average test metrics from all folds:""" , a__ )
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=a__ , default=a__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=a__ , default=3 , help="""The number of splits to perform across the dataset""" )
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(a__ , a__ )
if __name__ == "__main__":
main()
| 548 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case : Union[str, Any] = MODEL_FOR_CAUSAL_LM_MAPPING
snake_case : List[str] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
UpperCamelCase__ = text_generator("""This is a test""" , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
UpperCamelCase__ = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
__lowerCAmelCase , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
UpperCamelCase__ = text_generator("""This is a test""" , do_sample=__lowerCAmelCase , num_return_sequences=2 , return_tensors=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = text_generator.model.config.eos_token_id
UpperCamelCase__ = """<pad>"""
UpperCamelCase__ = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=__lowerCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__lowerCAmelCase , )
self.assertEqual(
__lowerCAmelCase , [
[
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
],
[
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
],
] , )
@require_tf
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
UpperCamelCase__ = text_generator("""This is a test""" , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
UpperCamelCase__ = text_generator(["""This is a test""", """This is a second test"""] , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = TextGenerationPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
return text_generator, ["This is a test", "Another test"]
def _lowerCamelCase ( self ):
UpperCamelCase__ = """Hello I believe in"""
UpperCamelCase__ = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase__ = text_generator(__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
UpperCamelCase__ = text_generator(__lowerCAmelCase , stop_sequence=""" fe""" )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": """Hello I believe in fe"""}] )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = text_generator.model
UpperCamelCase__ = text_generator.tokenizer
UpperCamelCase__ = text_generator("""This is a test""" )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
UpperCamelCase__ = text_generator("""This is a test""" , return_full_text=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
UpperCamelCase__ = pipeline(task="""text-generation""" , model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , return_full_text=__lowerCAmelCase )
UpperCamelCase__ = text_generator("""This is a test""" )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
UpperCamelCase__ = text_generator("""This is a test""" , return_full_text=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
UpperCamelCase__ = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
[{"""generated_text""": ANY(__lowerCAmelCase )}, {"""generated_text""": ANY(__lowerCAmelCase )}],
[{"""generated_text""": ANY(__lowerCAmelCase )}, {"""generated_text""": ANY(__lowerCAmelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCamelCase__ = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
[{"""generated_text""": ANY(__lowerCAmelCase )}, {"""generated_text""": ANY(__lowerCAmelCase )}],
[{"""generated_text""": ANY(__lowerCAmelCase )}, {"""generated_text""": ANY(__lowerCAmelCase )}],
] , )
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = text_generator("""test""" , return_full_text=__lowerCAmelCase , return_text=__lowerCAmelCase )
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = text_generator("""test""" , return_full_text=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = text_generator("""test""" , return_text=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCamelCase__ = text_generator("""""" )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCamelCase__ = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCamelCase__ = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 500 , max_new_tokens=20 )
UpperCamelCase__ = text_generator("""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__lowerCAmelCase ):
text_generator(
"""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowerCamelCase ( self ):
import torch
# Classic `model_kwargs`
UpperCamelCase__ = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase__ = pipe("""This is a test""" )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCamelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase__ = pipe("""This is a test""" )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCamelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
UpperCamelCase__ = pipe("""This is a test""" )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def _lowerCamelCase ( self ):
import torch
UpperCamelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowerCamelCase ( self ):
import torch
UpperCamelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=__lowerCAmelCase , top_p=0.5 )
def _lowerCamelCase ( self ):
UpperCamelCase__ = """Hello world"""
UpperCamelCase__ = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
UpperCamelCase__ = logging.get_logger("""transformers.generation.tf_utils""" )
else:
UpperCamelCase__ = logging.get_logger("""transformers.generation.utils""" )
UpperCamelCase__ = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__lowerCAmelCase ) as cl:
UpperCamelCase__ = text_generator(__lowerCAmelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(__lowerCAmelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__lowerCAmelCase ) as cl:
UpperCamelCase__ = text_generator(__lowerCAmelCase , max_new_tokens=1 )
self.assertNotIn(__lowerCAmelCase , cl.out )
with CaptureLogger(__lowerCAmelCase ) as cl:
UpperCamelCase__ = text_generator(__lowerCAmelCase , max_length=10 )
self.assertNotIn(__lowerCAmelCase , cl.out )
| 548 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : Optional[Any] = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = MgpstrTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def _lowercase ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
_lowerCamelCase : List[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_lowerCamelCase : Optional[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = "tester"
_lowerCamelCase : Optional[Any] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_lowerCamelCase : Optional[Any] = tokenizer.encode([special_token] ,add_special_tokens=__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) ,1 )
_lowerCamelCase : int = tokenizer.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase, _lowerCamelCase : List[Any] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertNotEqual(len(__lowerCAmelCase ) ,0 )
_lowerCamelCase : Optional[int] = tokenizer.decode(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(text_a.replace(" " ,"" ) ,__lowerCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _lowercase ( self: str ):
'''simple docstring'''
pass | 46 | 0 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Tuple ):
__magic_name__ , __magic_name__ = set(_SCREAMING_SNAKE_CASE ), [start]
while stack:
__magic_name__ = stack.pop()
explored.add(_SCREAMING_SNAKE_CASE )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_SCREAMING_SNAKE_CASE )
return explored
a_ : List[str] = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A')) | 702 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = SwinConfig(image_size=192 )
if "base" in model_name:
__magic_name__ = 6
__magic_name__ = 128
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (4, 8, 16, 32)
elif "large" in model_name:
__magic_name__ = 12
__magic_name__ = 192
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
__magic_name__ = window_size
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
return config
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if "encoder.mask_token" in name:
__magic_name__ = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
__magic_name__ = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
__magic_name__ = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
__magic_name__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__magic_name__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__magic_name__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__magic_name__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__magic_name__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__magic_name__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__magic_name__ = '''layernorm.weight'''
if name == "encoder.norm.bias":
__magic_name__ = '''layernorm.bias'''
if "decoder" in name:
pass
else:
__magic_name__ = '''swin.''' + name
return name
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Any ):
for key in orig_state_dict.copy().keys():
__magic_name__ = orig_state_dict.pop(snake_case_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
__magic_name__ = key.split('''.''' )
__magic_name__ = int(key_split[2] )
__magic_name__ = int(key_split[4] )
__magic_name__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[
dim : dim * 2, :
]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[
:dim
]
__magic_name__ = val[
dim : dim * 2
]
__magic_name__ = val[
-dim:
]
else:
__magic_name__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : Any , snake_case_ : str ):
__magic_name__ = torch.load(snake_case_ , map_location='''cpu''' )['''model''']
__magic_name__ = get_swin_config(snake_case_ )
__magic_name__ = SwinForMaskedImageModeling(snake_case_ )
model.eval()
__magic_name__ = convert_state_dict(snake_case_ , snake_case_ )
model.load_state_dict(snake_case_ )
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
__magic_name__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
__magic_name__ = image_processor(images=snake_case_ , return_tensors='''pt''' )
with torch.no_grad():
__magic_name__ = model(**snake_case_ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 678 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a :List[Any] = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :Tuple = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :List[str] = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :str = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
a :Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 680 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase = 200_0000 ) -> int:
SCREAMING_SNAKE_CASE__ : int = [0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE__ : str = 1
SCREAMING_SNAKE_CASE__ : str = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Any = 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for i in range(__lowerCAmelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'{solution() = }')
| 680 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : List[str] = tempfile.mkdtemp()
# fmt: off
__UpperCAmelCase : Tuple = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__UpperCAmelCase : Dict = dict(zip(lowercase__ , range(len(lowercase__))))
__UpperCAmelCase : Dict = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__UpperCAmelCase : Tuple = {'''unk_token''': '''<unk>'''}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(lowercase__) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(lowercase__))
__UpperCAmelCase : List[str] = {
'''do_resize''': True,
'''size''': 2_0,
'''do_center_crop''': True,
'''crop_size''': 1_8,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__UpperCAmelCase : Any = os.path.join(self.tmpdirname , lowercase__)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(lowercase__ , lowercase__)
def A( self , **lowercase__):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **lowercase__)
def A( self , **lowercase__):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **lowercase__)
def A( self , **lowercase__):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase__)
def A( self):
shutil.rmtree(self.tmpdirname)
def A( self):
__UpperCAmelCase : str = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)]
__UpperCAmelCase : Dict = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1)) for x in image_inputs]
return image_inputs
def A( self):
__UpperCAmelCase : Any = self.get_tokenizer()
__UpperCAmelCase : Dict = self.get_rust_tokenizer()
__UpperCAmelCase : str = self.get_image_processor()
__UpperCAmelCase : Dict = OwlViTProcessor(tokenizer=lowercase__ , image_processor=lowercase__)
processor_slow.save_pretrained(self.tmpdirname)
__UpperCAmelCase : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase__)
__UpperCAmelCase : List[str] = OwlViTProcessor(tokenizer=lowercase__ , image_processor=lowercase__)
processor_fast.save_pretrained(self.tmpdirname)
__UpperCAmelCase : Union[str, Any] = OwlViTProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , lowercase__)
self.assertIsInstance(processor_fast.tokenizer , lowercase__)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , lowercase__)
self.assertIsInstance(processor_fast.image_processor , lowercase__)
def A( self):
__UpperCAmelCase : Any = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__UpperCAmelCase : Optional[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
__UpperCAmelCase : Tuple = self.get_image_processor(do_normalize=lowercase__)
__UpperCAmelCase : Optional[int] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowercase__)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase__)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase__)
def A( self):
__UpperCAmelCase : int = self.get_image_processor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : Dict = OwlViTProcessor(tokenizer=lowercase__ , image_processor=lowercase__)
__UpperCAmelCase : Tuple = self.prepare_image_inputs()
__UpperCAmelCase : int = image_processor(lowercase__ , return_tensors='''np''')
__UpperCAmelCase : Tuple = processor(images=lowercase__ , return_tensors='''np''')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def A( self):
__UpperCAmelCase : Union[str, Any] = self.get_image_processor()
__UpperCAmelCase : List[Any] = self.get_tokenizer()
__UpperCAmelCase : Any = OwlViTProcessor(tokenizer=lowercase__ , image_processor=lowercase__)
__UpperCAmelCase : List[str] = '''lower newer'''
__UpperCAmelCase : Dict = processor(text=lowercase__ , return_tensors='''np''')
__UpperCAmelCase : Optional[int] = tokenizer(lowercase__ , return_tensors='''np''')
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist())
def A( self):
__UpperCAmelCase : List[Any] = self.get_image_processor()
__UpperCAmelCase : Dict = self.get_tokenizer()
__UpperCAmelCase : List[Any] = OwlViTProcessor(tokenizer=lowercase__ , image_processor=lowercase__)
__UpperCAmelCase : Union[str, Any] = '''lower newer'''
__UpperCAmelCase : Union[str, Any] = self.prepare_image_inputs()
__UpperCAmelCase : Optional[Any] = processor(text=lowercase__ , images=lowercase__)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(lowercase__):
processor()
def A( self):
__UpperCAmelCase : Tuple = '''google/owlvit-base-patch32'''
__UpperCAmelCase : Tuple = OwlViTProcessor.from_pretrained(lowercase__)
__UpperCAmelCase : Dict = ['''cat''', '''nasa badge''']
__UpperCAmelCase : List[Any] = processor(text=lowercase__)
__UpperCAmelCase : Any = 1_6
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''attention_mask'''])
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length))
# test if it raises when no input is passed
with pytest.raises(lowercase__):
processor()
def A( self):
__UpperCAmelCase : List[str] = '''google/owlvit-base-patch32'''
__UpperCAmelCase : Optional[Any] = OwlViTProcessor.from_pretrained(lowercase__)
__UpperCAmelCase : Optional[int] = [['''cat''', '''nasa badge'''], ['''person''']]
__UpperCAmelCase : Any = processor(text=lowercase__)
__UpperCAmelCase : int = 1_6
__UpperCAmelCase : int = len(lowercase__)
__UpperCAmelCase : int = max([len(lowercase__) for texts in input_texts])
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''attention_mask'''])
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length))
# test if it raises when no input is passed
with pytest.raises(lowercase__):
processor()
def A( self):
__UpperCAmelCase : List[Any] = '''google/owlvit-base-patch32'''
__UpperCAmelCase : Tuple = OwlViTProcessor.from_pretrained(lowercase__)
__UpperCAmelCase : Optional[int] = ['''cat''', '''nasa badge''']
__UpperCAmelCase : List[Any] = processor(text=lowercase__)
__UpperCAmelCase : Optional[int] = 1_6
__UpperCAmelCase : Optional[Any] = inputs['''input_ids''']
__UpperCAmelCase : str = [
[4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''attention_mask'''])
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length))
self.assertListEqual(list(input_ids[0]) , predicted_ids[0])
self.assertListEqual(list(input_ids[1]) , predicted_ids[1])
def A( self):
__UpperCAmelCase : Tuple = self.get_image_processor()
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[int] = OwlViTProcessor(tokenizer=lowercase__ , image_processor=lowercase__)
__UpperCAmelCase : str = self.prepare_image_inputs()
__UpperCAmelCase : Dict = self.prepare_image_inputs()
__UpperCAmelCase : int = processor(images=lowercase__ , query_images=lowercase__)
self.assertListEqual(list(inputs.keys()) , ['''query_pixel_values''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(lowercase__):
processor()
def A( self):
__UpperCAmelCase : Tuple = self.get_image_processor()
__UpperCAmelCase : int = self.get_tokenizer()
__UpperCAmelCase : List[Any] = OwlViTProcessor(tokenizer=lowercase__ , image_processor=lowercase__)
__UpperCAmelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : int = processor.batch_decode(lowercase__)
__UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
| 675 |
def __SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCAmelCase = generate_large_matrix()
lowerCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : List[Any] = (left + right) // 2
__UpperCAmelCase : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : Dict = mid + 1
else:
__UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = len(grid[0] )
for i in range(len(lowercase_ ) ):
__UpperCAmelCase : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase : Tuple = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : Union[str, Any] = timeit(f"{func}(grid=grid)" , setup=lowercase_ , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 675 | 1 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class snake_case__ :
@staticmethod
def A ( *UpperCamelCase_ , **UpperCamelCase_ ) -> List[str]:
"""simple docstring"""
pass
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Image ):
"""simple docstring"""
a_ : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class snake_case__ ( unittest.TestCase ):
UpperCAmelCase : str = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def A ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
"""simple docstring"""
a_ : str = DepthEstimationPipeline(model=UpperCamelCase_ , image_processor=UpperCamelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def A ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
"""simple docstring"""
a_ : List[str] = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , UpperCamelCase_ )
import datasets
a_ : str = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
a_ : int = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , UpperCamelCase_ , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def A ( self ) -> List[str]:
"""simple docstring"""
pass
@slow
@require_torch
def A ( self ) -> Optional[Any]:
"""simple docstring"""
a_ : List[str] = """Intel/dpt-large"""
a_ : Any = pipeline("""depth-estimation""" , model=UpperCamelCase_ )
a_ : str = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
a_ : List[str] = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def A ( self ) -> List[Any]:
"""simple docstring"""
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 419 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
SCREAMING_SNAKE_CASE : Tuple = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
SCREAMING_SNAKE_CASE : Tuple = "</w>"
SCREAMING_SNAKE_CASE : int = "@@ "
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
a_ : int = set()
a_ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a_ : Union[str, Any] = char
return pairs
# Speech2Text2 has no max input length
SCREAMING_SNAKE_CASE : int = {"facebook/s2t-wav2vec2-large-en-de": 10_24}
class snake_case__ ( __A ):
UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="<pad>" , UpperCamelCase_="</s>" , UpperCamelCase_="<unk>" , UpperCamelCase_=False , UpperCamelCase_=None , **UpperCamelCase_ , ) -> Dict:
"""simple docstring"""
super().__init__(
unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , **UpperCamelCase_ , )
a_ : str = do_lower_case
with open(UpperCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
a_ : Any = json.load(UpperCamelCase_ )
a_ : List[Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
a_ : str = None
a_ : Tuple = None
else:
with open(UpperCamelCase_ , encoding="""utf-8""" ) as merges_handle:
a_ : Any = merges_handle.read().split("""\n""" )[:-1]
a_ : Optional[int] = [tuple(merge.split()[:2] ) for merge in merges]
a_ : Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
a_ : Union[str, Any] = {}
@property
def A ( self ) -> int:
"""simple docstring"""
return len(self.decoder )
def A ( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def A ( self , UpperCamelCase_ ) -> List[str]:
"""simple docstring"""
a_ : Union[str, Any] = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
a_ : Dict = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
a_ : Optional[Any] = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
a_ , a_ : List[Any] = bigram
a_ : Any = []
a_ : Any = 0
while i < len(UpperCamelCase_ ):
try:
a_ : int = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a_ : str = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a_ : str = tuple(UpperCamelCase_ )
a_ : List[str] = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
a_ : Union[str, Any] = get_pairs(UpperCamelCase_ )
a_ : str = """ """.join(UpperCamelCase_ )
if word == "\n " + BPE_TOKEN_MERGES:
a_ : Tuple = """\n""" + BPE_TOKEN_MERGES
if word.endswith(UpperCamelCase_ ):
a_ : int = word.replace(UpperCamelCase_ , """""" )
a_ : List[str] = word.replace(""" """ , UpperCamelCase_ )
a_ : int = word
return word
def A ( self , UpperCamelCase_ ) -> int:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"""This tokenizer was instantiated without a `merges.txt` file, so"""
""" that it can only be used for decoding, not for encoding."""
"""Make sure to provide `merges.txt` file at instantiation to enable """
"""encoding.""" )
if self.do_lower_case:
a_ : str = text.lower()
a_ : List[Any] = text.split()
a_ : str = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCamelCase_ ).split(""" """ ) ) )
return split_tokens
def A ( self , UpperCamelCase_ ) -> int:
"""simple docstring"""
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def A ( self , UpperCamelCase_ ) -> str:
"""simple docstring"""
a_ : int = self.decoder.get(UpperCamelCase_ , self.unk_token )
return result
def A ( self , UpperCamelCase_ ) -> str:
"""simple docstring"""
a_ : Optional[Any] = """ """.join(UpperCamelCase_ )
# make sure @@ tokens are concatenated
a_ : str = """""".join(string.split(UpperCamelCase_ ) )
return string
def A ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a_ : Optional[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
a_ : Dict = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + """\n""" )
a_ : Optional[Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
a_ : List[Any] = token_index
writer.write(""" """.join(UpperCamelCase_ ) + """\n""" )
index += 1
return (vocab_file, merges_file)
| 419 | 1 |
import os
from distutils.util import strtobool
def __UpperCAmelCase( lowercase_ , lowercase_ ):
for e in env_keys:
_lowerCamelCase : List[str] = int(os.environ.get(lowercase_ , -1 ) )
if val >= 0:
return val
return default
def __UpperCAmelCase( lowercase_ , lowercase_=False ):
_lowerCamelCase : int = os.environ.get(lowercase_ , str(lowercase_ ) )
return strtobool(lowercase_ ) == 1 # As its name indicates `strtobool` actually returns an int...
def __UpperCAmelCase( lowercase_ , lowercase_="no" ):
_lowerCamelCase : int = os.environ.get(lowercase_ , str(lowercase_ ) )
return value
| 613 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCamelCase = logging.getLogger(__name__)
@dataclass
class __A :
"""simple docstring"""
UpperCAmelCase__ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCAmelCase__ = field(
default=lowerCamelCase__ ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCAmelCase__ = field(
default="""NER""" ,metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
UpperCAmelCase__ = field(
default=lowerCamelCase__ ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCAmelCase__ = field(default=lowerCamelCase__ ,metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCAmelCase__ = field(
default=lowerCamelCase__ ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
@dataclass
class __A :
"""simple docstring"""
UpperCAmelCase__ = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
UpperCAmelCase__ = field(
default=lowerCamelCase__ ,metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} ,)
UpperCAmelCase__ = field(
default=128 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
UpperCAmelCase__ = field(
default=lowerCamelCase__ ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __UpperCAmelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowerCamelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
_lowerCamelCase : Optional[Any] = import_module('''tasks''' )
try:
_lowerCamelCase : Dict = getattr(lowercase_ , model_args.task_type )
_lowerCamelCase : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowercase_ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
_lowerCamelCase : List[str] = token_classification_task.get_labels(data_args.labels )
_lowerCamelCase : Dict[int, str] = dict(enumerate(lowercase_ ) )
_lowerCamelCase : Any = len(lowercase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase_ , idalabel=lowercase_ , labelaid={label: i for i, label in enumerate(lowercase_ )} , cache_dir=model_args.cache_dir , )
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
_lowerCamelCase : Tuple = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , )
# Get datasets
_lowerCamelCase : Optional[int] = (
TokenClassificationDataset(
token_classification_task=lowercase_ , data_dir=data_args.data_dir , tokenizer=lowercase_ , labels=lowercase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_lowerCamelCase : Dict = (
TokenClassificationDataset(
token_classification_task=lowercase_ , data_dir=data_args.data_dir , tokenizer=lowercase_ , labels=lowercase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(lowercase_ , lowercase_ ) -> Tuple[List[int], List[int]]:
_lowerCamelCase : List[Any] = np.argmax(lowercase_ , axis=2 )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = preds.shape
_lowerCamelCase : Optional[Any] = [[] for _ in range(lowercase_ )]
_lowerCamelCase : str = [[] for _ in range(lowercase_ )]
for i in range(lowercase_ ):
for j in range(lowercase_ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(lowercase_ ) -> Dict:
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(lowercase_ , lowercase_ ),
"precision": precision_score(lowercase_ , lowercase_ ),
"recall": recall_score(lowercase_ , lowercase_ ),
"f1": fa_score(lowercase_ , lowercase_ ),
}
# Data collator
_lowerCamelCase : Optional[int] = DataCollatorWithPadding(lowercase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_lowerCamelCase : List[Any] = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , compute_metrics=lowercase_ , data_collator=lowercase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowerCamelCase : Tuple = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_lowerCamelCase : str = trainer.evaluate()
_lowerCamelCase : Any = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(lowercase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , lowercase_ , lowercase_ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(lowercase_ )
# Predict
if training_args.do_predict:
_lowerCamelCase : Union[str, Any] = TokenClassificationDataset(
token_classification_task=lowercase_ , data_dir=data_args.data_dir , tokenizer=lowercase_ , labels=lowercase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = trainer.predict(lowercase_ )
_lowerCamelCase, _lowerCamelCase : Tuple = align_predictions(lowercase_ , lowercase_ )
_lowerCamelCase : Tuple = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(lowercase_ , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , lowercase_ , lowercase_ )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
_lowerCamelCase : List[str] = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(lowercase_ , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(lowercase_ , lowercase_ , lowercase_ )
return results
def __UpperCAmelCase( lowercase_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 613 | 1 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __UpperCamelCase ( unittest.TestCase ):
__A : List[Any] = inspect.getfile(accelerate.test_utils )
__A : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] )
__A : Optional[int] = ["""accelerate""", """launch"""]
__A : Any = Path.home() / """.cache/huggingface/accelerate"""
__A : Any = """default_config.yaml"""
__A : Union[str, Any] = config_folder / config_file
__A : List[Any] = config_folder / """_default_config.yaml"""
__A : Union[str, Any] = Path("""tests/test_configs""" )
@classmethod
def UpperCamelCase( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def UpperCamelCase( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def UpperCamelCase( self ):
_UpperCAmelCase = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def UpperCamelCase( self ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=_UpperCamelCase ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(_UpperCamelCase ), self.test_file_path] , env=os.environ.copy() )
def UpperCamelCase( self ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class __UpperCamelCase ( unittest.TestCase ):
__A : Dict = """test-tpu"""
__A : Optional[Any] = """us-central1-a"""
__A : int = """ls"""
__A : Tuple = ["""accelerate""", """tpu-config"""]
__A : Union[str, Any] = """cd /usr/share"""
__A : Optional[Any] = """tests/test_samples/test_command_file.sh"""
__A : Any = """Running gcloud compute tpus tpu-vm ssh"""
def UpperCamelCase( self ):
_UpperCAmelCase = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCamelCase , )
def UpperCamelCase( self ):
_UpperCAmelCase = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCamelCase , )
def UpperCamelCase( self ):
_UpperCAmelCase = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_UpperCamelCase )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCamelCase , )
def UpperCamelCase( self ):
_UpperCAmelCase = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCamelCase , )
def UpperCamelCase( self ):
_UpperCAmelCase = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=_UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , _UpperCamelCase , )
def UpperCamelCase( self ):
_UpperCAmelCase = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCamelCase , )
def UpperCamelCase( self ):
_UpperCAmelCase = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCamelCase , )
def UpperCamelCase( self ):
_UpperCAmelCase = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCamelCase , )
def UpperCamelCase( self ):
_UpperCAmelCase = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=_UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCamelCase , ) | 32 |
'''simple docstring'''
__UpperCAmelCase = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 379 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCamelCase_ = None
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
UpperCamelCase_ = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
UpperCamelCase_ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[int] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = PRETRAINED_VOCAB_FILES_MAP
A : Tuple = ['''input_ids''', '''attention_mask''']
A : Tuple = MBartTokenizer
A : List[int] = []
A : List[int] = []
def __init__( self, A=None, A=None, A="<s>", A="</s>", A="</s>", A="<s>", A="<unk>", A="<pad>", A="<mask>", A=None, A=None, A=None, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = AddedToken(A, lstrip=A, rstrip=A ) if isinstance(A, A ) else mask_token
super().__init__(
vocab_file=A, tokenizer_file=A, bos_token=A, eos_token=A, sep_token=A, cls_token=A, unk_token=A, pad_token=A, mask_token=A, src_lang=A, tgt_lang=A, additional_special_tokens=A, **A, )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : str = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : int = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
SCREAMING_SNAKE_CASE : Tuple = {
lang_code: self.convert_tokens_to_ids(A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE : Tuple = src_lang if src_lang is not None else 'en_XX'
SCREAMING_SNAKE_CASE : int = self.convert_tokens_to_ids(self._src_lang )
SCREAMING_SNAKE_CASE : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self, A, A, A, A, **A ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
SCREAMING_SNAKE_CASE : Optional[Any] = src_lang
SCREAMING_SNAKE_CASE : Optional[Any] = self(A, add_special_tokens=A, return_tensors=A, **A )
SCREAMING_SNAKE_CASE : int = self.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : str = tgt_lang_id
return inputs
def UpperCamelCase_ ( self, A, A = "en_XX", A = None, A = "ro_RO", **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = src_lang
SCREAMING_SNAKE_CASE : str = tgt_lang
return super().prepare_seqaseq_batch(A, A, **A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : List[str] = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE : int = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE : List[str] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
SCREAMING_SNAKE_CASE : List[str] = os.path.join(
A, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file, A )
return (out_vocab_file,)
| 713 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
UpperCamelCase_ = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
SCREAMING_SNAKE_CASE : Optional[Any] = int(re.match(r'.*layer_(\d*).*' ,__UpperCamelCase )[1] )
layer_number -= 3
return f"h.{layer_number}." + key
def lowercase__( __UpperCamelCase: Any ):
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
SCREAMING_SNAKE_CASE : Union[str, Any] = re.search(r'[^\d](\d+)$' ,str(__UpperCamelCase ) )
if bit_search is None:
raise ValueError(f"`dtype` is not a valid dtype: {dtype}." )
SCREAMING_SNAKE_CASE : str = int(bit_search.groups()[0] )
return bit_size // 8
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ,__UpperCamelCase: List[str] ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
if bloom_config_file == "":
SCREAMING_SNAKE_CASE : Union[str, Any] = BloomConfig()
else:
SCREAMING_SNAKE_CASE : List[Any] = BloomConfig.from_json_file(__UpperCamelCase )
if shard_model:
SCREAMING_SNAKE_CASE : int = os.listdir(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = sorted(filter(lambda __UpperCamelCase : s.startswith('layer' ) and "model_00" in s ,__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = {'weight_map': {}, 'metadata': {}}
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : int = BloomConfig()
for j, file in enumerate(__UpperCamelCase ):
print('Processing file: {}'.format(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[Any] = None
for i in range(__UpperCamelCase ):
# load all TP files
SCREAMING_SNAKE_CASE : Optional[Any] = file.replace('model_00' ,f"model_0{i}" )
SCREAMING_SNAKE_CASE : Any = torch.load(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,map_location='cpu' )
# Rename keys in the transformers names
SCREAMING_SNAKE_CASE : Dict = list(temp.keys() )
for key in keys:
SCREAMING_SNAKE_CASE : int = temp.pop(__UpperCamelCase )
if tensors is None:
SCREAMING_SNAKE_CASE : Optional[Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(__UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
SCREAMING_SNAKE_CASE : List[str] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([tensors[key], temp[key]] ,dim=__UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
SCREAMING_SNAKE_CASE : int = tensors[key] / pretraining_tp
torch.save(
__UpperCamelCase ,os.path.join(
__UpperCamelCase ,'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) ,str(len(__UpperCamelCase ) ).zfill(5 ) ) ,) ,)
for key in tensors.keys():
SCREAMING_SNAKE_CASE : str = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
SCREAMING_SNAKE_CASE : Dict = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) ,str(len(__UpperCamelCase ) ).zfill(5 ) )
SCREAMING_SNAKE_CASE : Any = BloomConfig()
SCREAMING_SNAKE_CASE : Optional[Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
SCREAMING_SNAKE_CASE : int = total_size
with open(__UpperCamelCase ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(__UpperCamelCase ,WEIGHTS_NAME + '.index.json' ) ,'w' ,encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE : Union[str, Any] = json.dumps(__UpperCamelCase ,indent=2 ,sort_keys=__UpperCamelCase ) + '\n'
f.write(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : Tuple = BloomModel(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = os.listdir(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = sorted(filter(lambda __UpperCamelCase : s.startswith('layer' ) and "model_00" in s ,__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Tuple = None
for i, file in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = None
for i in range(__UpperCamelCase ):
# load all TP files
SCREAMING_SNAKE_CASE : List[Any] = file.replace('model_00' ,f"model_0{i}" )
SCREAMING_SNAKE_CASE : Any = torch.load(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,map_location='cpu' )
# Rename keys in the transformers names
SCREAMING_SNAKE_CASE : int = list(temp.keys() )
for key in keys:
SCREAMING_SNAKE_CASE : str = temp.pop(__UpperCamelCase )
if tensors is None:
SCREAMING_SNAKE_CASE : List[Any] = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(__UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
SCREAMING_SNAKE_CASE : str = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
SCREAMING_SNAKE_CASE : str = torch.cat([tensors[key], temp[key]] ,dim=__UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
SCREAMING_SNAKE_CASE : Any = tensors[key] / pretraining_tp
SCREAMING_SNAKE_CASE : Optional[Any] = model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase )
assert not other_keys.unexpected_keys, f"The keys {other_keys.unexpected_keys} are unexpected"
if missing_keys is None:
SCREAMING_SNAKE_CASE : List[Any] = set(other_keys.missing_keys )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"The keys {missing_keys} are missing"
# Save pytorch-model
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
SCREAMING_SNAKE_CASE : Optional[Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" )
if config.torch_dtype is not None:
SCREAMING_SNAKE_CASE : int = model.to(config.torch_dtype )
torch.save(model.state_dict() ,__UpperCamelCase )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(__UpperCamelCase ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
UpperCamelCase_ = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 508 | 0 |
'''simple docstring'''
import argparse
import os
import re
__lowerCamelCase = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__lowerCamelCase = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
__lowerCamelCase = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def a__ ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : bool = False ):
with open(UpperCamelCase_, '''r''', encoding='''utf-8''' ) as f:
UpperCAmelCase__ :int = f.read()
UpperCAmelCase__ :Any = content.split('''\n''' )
UpperCAmelCase__ :Union[str, Any] = []
UpperCAmelCase__ :Tuple = 0
while line_idx < len(UpperCamelCase_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
UpperCAmelCase__ :Dict = len(re.search(r'''^(\s*)\S''', lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
UpperCAmelCase__ :Dict = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
UpperCAmelCase__ :str = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
UpperCAmelCase__ :Optional[Any] = sorted(UpperCamelCase_, key=lambda UpperCamelCase_ : _re_identifier.search(UpperCamelCase_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(UpperCamelCase_, '''w''', encoding='''utf-8''' ) as f:
f.write('''\n'''.join(UpperCamelCase_ ) )
elif "\n".join(UpperCamelCase_ ) != content:
return True
def a__ ( UpperCamelCase_ : bool = False ):
UpperCAmelCase__ :Dict = [os.path.join(UpperCamelCase_, UpperCamelCase_ ) for f in os.listdir(UpperCamelCase_ ) if f.endswith('''.py''' )]
UpperCAmelCase__ :Optional[int] = [sort_auto_mapping(UpperCamelCase_, overwrite=UpperCamelCase_ ) for fname in fnames]
if not overwrite and any(UpperCamelCase_ ):
UpperCAmelCase__ :Any = [f for f, d in zip(UpperCamelCase_, UpperCamelCase_ ) if d]
raise ValueError(
F'''The following files have auto mappings that need sorting: {", ".join(UpperCamelCase_ )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
__lowerCamelCase = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 467 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=_snake_case ):
UpperCAmelCase = ["speech"]
def __init__( self : List[Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[str] ):
requires_backends(self , ['''speech'''] )
class UpperCAmelCase ( metaclass=_snake_case ):
UpperCAmelCase = ["speech"]
def __init__( self : int , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[str] ):
requires_backends(self , ['''speech'''] )
| 467 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__UpperCamelCase : Tuple = None
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Any = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCamelCase : Tuple = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
__UpperCamelCase : Optional[Any] = {
"camembert-base": 512,
}
__UpperCamelCase : Optional[Any] = "▁"
class __magic_name__ ( __lowerCAmelCase):
A: int = VOCAB_FILES_NAMES
A: Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A: List[str] = ["input_ids", "attention_mask"]
A: Dict = CamembertTokenizer
def __init__( self : int , lowerCamelCase__ : Any=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : Dict="<s>" , lowerCamelCase__ : List[str]="</s>" , lowerCamelCase__ : int="</s>" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Optional[int]="<unk>" , lowerCamelCase__ : Union[str, Any]="<pad>" , lowerCamelCase__ : List[Any]="<mask>" , lowerCamelCase__ : List[Any]=["<s>NOTUSED", "</s>NOTUSED"] , **lowerCamelCase__ : Optional[int] , ) -> str:
'''simple docstring'''
UpperCamelCase__ : List[str] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCamelCase__ : Tuple = vocab_file
UpperCamelCase__ : Optional[Any] = False if not self.vocab_file else True
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ : Any = [self.cls_token_id]
UpperCamelCase__ : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = [self.sep_token_id]
UpperCamelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase__ : List[str] = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 106 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model"}
__UpperCamelCase : List[Any] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
__UpperCamelCase : int = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
__UpperCamelCase : int = "▁"
class __magic_name__ ( __lowerCAmelCase):
A: Tuple = VOCAB_FILES_NAMES
A: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A: Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple="<s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : int="</s>" , lowerCamelCase__ : Optional[Any]="<s>" , lowerCamelCase__ : List[str]="<unk>" , lowerCamelCase__ : str="<pad>" , lowerCamelCase__ : int="<mask>" , lowerCamelCase__ : Optional[Dict[str, Any]] = None , **lowerCamelCase__ : Any , ) -> None:
'''simple docstring'''
UpperCamelCase__ : str = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
UpperCamelCase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
UpperCamelCase__ : Any = vocab_file
UpperCamelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
UpperCamelCase__ : Any = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
UpperCamelCase__ : Tuple = len(self.sp_model ) - 1
UpperCamelCase__ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ : List[str] = [self.cls_token_id]
UpperCamelCase__ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCamelCase__ : int = [self.sep_token_id]
UpperCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Tuple = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase__ : List[str] = self.sp_model.PieceToId(lowerCamelCase__ )
return spm_id if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : int ) -> List[str]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(lowerCamelCase__ )
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Any = ''''''
UpperCamelCase__ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
UpperCamelCase__ : str = True
UpperCamelCase__ : Tuple = []
else:
current_sub_tokens.append(lowerCamelCase__ )
UpperCamelCase__ : Any = False
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def __getstate__( self : Tuple ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : str = self.__dict__.copy()
UpperCamelCase__ : int = None
return state
def __setstate__( self : Tuple , lowerCamelCase__ : Any ) -> str:
'''simple docstring'''
UpperCamelCase__ : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ : Optional[Any] = {}
UpperCamelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase__ : Any = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , '''wb''' ) as fi:
UpperCamelCase__ : int = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 106 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : Union[str, Any] = value
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : Union[str, Any] = tree
def A_ ( self , lowercase ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod() | 630 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """layoutlmv3"""
def __init__( self , lowercase=50265 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-5 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase=1024 , lowercase=128 , lowercase=128 , lowercase=True , lowercase=32 , lowercase=128 , lowercase=64 , lowercase=256 , lowercase=True , lowercase=True , lowercase=True , lowercase=224 , lowercase=3 , lowercase=16 , lowercase=None , **lowercase , ):
super().__init__(
vocab_size=lowercase , hidden_size=lowercase , num_hidden_layers=lowercase , num_attention_heads=lowercase , intermediate_size=lowercase , hidden_act=lowercase , hidden_dropout_prob=lowercase , attention_probs_dropout_prob=lowercase , max_position_embeddings=lowercase , type_vocab_size=lowercase , initializer_range=lowercase , layer_norm_eps=lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase , )
_lowerCamelCase : List[str] = max_ad_position_embeddings
_lowerCamelCase : Optional[Any] = coordinate_size
_lowerCamelCase : int = shape_size
_lowerCamelCase : Optional[Any] = has_relative_attention_bias
_lowerCamelCase : Dict = rel_pos_bins
_lowerCamelCase : Tuple = max_rel_pos
_lowerCamelCase : int = has_spatial_attention_bias
_lowerCamelCase : Optional[int] = rel_ad_pos_bins
_lowerCamelCase : List[Any] = max_rel_ad_pos
_lowerCamelCase : Any = text_embed
_lowerCamelCase : List[Any] = visual_embed
_lowerCamelCase : str = input_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Optional[int] = classifier_dropout
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = version.parse("""1.12""" )
@property
def A_ ( self ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def A_ ( self ):
return 1E-5
@property
def A_ ( self ):
return 12
def A_ ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , lowercase = 3 , lowercase = 40 , lowercase = 40 , ):
setattr(processor.image_processor , 'apply_ocr' , lowercase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCamelCase : Any = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCamelCase : Tuple = processor.tokenizer.num_special_tokens_to_add(lowercase )
_lowerCamelCase : Any = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase )
# Generate dummy inputs according to compute batch and sequence
_lowerCamelCase : List[Any] = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_lowerCamelCase : List[str] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_lowerCamelCase : Tuple = self._generate_dummy_images(lowercase , lowercase , lowercase , lowercase )
_lowerCamelCase : str = dict(
processor(
lowercase , text=lowercase , boxes=lowercase , return_tensors=lowercase , ) )
return inputs | 630 | 1 |
def snake_case ():
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = 1
while len(_lowerCAmelCase ) < 1e6:
constant.append(str(_lowerCAmelCase ) )
i += 1
lowerCamelCase__ = "".join(_lowerCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 716 |
from string import ascii_lowercase, ascii_uppercase
def snake_case (UpperCamelCase : str ):
'''simple docstring'''
if not sentence:
return ""
lowerCamelCase__ = dict(zip(UpperCamelCase , UpperCamelCase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 235 | 0 |
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(__a , (list, tuple) ) or not all(
isinstance(__a , __a ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
SCREAMING_SNAKE_CASE : Optional[Any] =numbers[0]
for i in range(1 , len(__a ) ):
# update the maximum and minimum subarray products
SCREAMING_SNAKE_CASE : Union[str, Any] =numbers[i]
if number < 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] =min_till_now, max_till_now
SCREAMING_SNAKE_CASE : Optional[Any] =max(__a , max_till_now * number )
SCREAMING_SNAKE_CASE : List[str] =min(__a , min_till_now * number )
# update the maximum product found till now
SCREAMING_SNAKE_CASE : Dict =max(__a , __a )
return max_prod
| 258 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any =HfArgumentParser(__a )
SCREAMING_SNAKE_CASE : int =parser.parse_args_into_dataclasses()[0]
SCREAMING_SNAKE_CASE : int =TensorFlowBenchmark(args=__a )
try:
SCREAMING_SNAKE_CASE : Dict =parser.parse_args_into_dataclasses()[0]
except ValueError as e:
SCREAMING_SNAKE_CASE : Any ='''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
SCREAMING_SNAKE_CASE : str =''' '''.join(str(__a ).split(''' ''' )[:-1] )
SCREAMING_SNAKE_CASE : Dict =''''''
SCREAMING_SNAKE_CASE : Tuple =eval(str(__a ).split(''' ''' )[-1] )
SCREAMING_SNAKE_CASE : List[str] =[]
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__a )
if len(__a ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] =full_error_msg + begin_error_msg + str(__a )
raise ValueError(__a )
benchmark.run()
if __name__ == "__main__":
main()
| 258 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=a__ )
class UpperCAmelCase__ ( a__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
_SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"audio": Audio()} )
_SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"labels": ClassLabel} )
_SCREAMING_SNAKE_CASE : str = "audio"
_SCREAMING_SNAKE_CASE : str = "labels"
def lowerCAmelCase__ ( self , _lowerCAmelCase ):
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCamelCase_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
a =copy.deepcopy(self )
a =self.label_schema.copy()
a =features[self.label_column]
a =label_schema
return task_template
@property
def lowerCAmelCase__ ( self ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 710 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCamelCase ( UpperCAmelCase_ : str = "laptop" )-> DataFrame:
"""simple docstring"""
a =F'''https://www.amazon.in/laptop/s?k={product}'''
a ={
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
a =BeautifulSoup(requests.get(UpperCAmelCase_ , headers=UpperCAmelCase_ ).text )
# Initialize a Pandas dataframe with the column titles
a =DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
a =item.ha.text
a ="""https://www.amazon.in/""" + item.ha.a["""href"""]
a =item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
a =item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
a ="""Not available"""
try:
a =(
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
a =""""""
try:
a =float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 100 )
except ValueError:
a =float("""nan""" )
except AttributeError:
pass
a =[
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
a =""" """
a =""" """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_lowerCamelCase = '''headphones'''
get_amazon_product_data(product).to_csv(f"""Amazon Product Data for {product}.csv""")
| 321 | 0 |
from __future__ import annotations
from collections.abc import Iterator
class A__ :
def __init__( self : Any , _a : int ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =value
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
class A__ :
def __init__( self : List[Any] , _a : Node ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tree
def __UpperCamelCase ( self : Union[str, Any] , _a : Node | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_SCREAMING_SNAKE_CASE ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
_SCREAMING_SNAKE_CASE =f"{src_lang}-{tgt_lang}"
_SCREAMING_SNAKE_CASE =f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a__ ,exist_ok=a__)
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''README.md''')
print(f"Generating {path}")
with open(a__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(a__)
# make sure we are under the root of the project
snake_case_ : Any = Path(__file__).resolve().parent.parent.parent
snake_case_ : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : Union[str, Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name) | 691 | 1 |
"""simple docstring"""
from __future__ import annotations
class snake_case :
"""simple docstring"""
def __init__( self : Dict ,lowerCamelCase__ : list[list[int]] ):
UpperCAmelCase__ = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(lowerCamelCase__ ) != 0:
UpperCAmelCase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCamelCase__ ) != cols:
raise error
for value in row:
if not isinstance(lowerCamelCase__ ,(int, float) ):
raise error
UpperCAmelCase__ = rows
else:
UpperCAmelCase__ = []
def __lowerCAmelCase ( self : Union[str, Any] ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def __lowerCAmelCase ( self : str ):
return len(self.rows )
@property
def __lowerCAmelCase ( self : List[Any] ):
return len(self.rows[0] )
@property
def __lowerCAmelCase ( self : Any ):
return (self.num_rows, self.num_columns)
@property
def __lowerCAmelCase ( self : Optional[int] ):
return self.order[0] == self.order[1]
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCamelCase__ )
def __lowerCAmelCase ( self : str ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def __lowerCAmelCase ( self : List[str] ):
return bool(self.determinant() )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : int ):
UpperCAmelCase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCamelCase__ ).determinant()
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ):
if (row + column) % 2 == 0:
return self.get_minor(lowerCamelCase__ ,lowerCamelCase__ )
return -1 * self.get_minor(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
return Matrix(
[
[self.get_minor(lowerCamelCase__ ,lowerCamelCase__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def __lowerCAmelCase ( self : int ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self : Optional[Any] ):
return str(self.rows )
def __str__( self : List[str] ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(lowerCamelCase__ ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : list[int] ,lowerCamelCase__ : int | None = None ):
UpperCAmelCase__ = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
raise type_error
for value in row:
if not isinstance(lowerCamelCase__ ,(int, float) ):
raise type_error
if len(lowerCamelCase__ ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(lowerCamelCase__ )
else:
UpperCAmelCase__ = self.rows[0:position] + [row] + self.rows[position:]
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : list[int] ,lowerCamelCase__ : int | None = None ):
UpperCAmelCase__ = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
raise type_error
for value in column:
if not isinstance(lowerCamelCase__ ,(int, float) ):
raise type_error
if len(lowerCamelCase__ ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
UpperCAmelCase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : List[Any] ,lowerCamelCase__ : object ):
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any ,lowerCamelCase__ : object ):
return not self == other
def __neg__( self : Dict ):
return self * -1
def __add__( self : str ,lowerCamelCase__ : Matrix ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : List[str] ,lowerCamelCase__ : Matrix ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : List[str] ,lowerCamelCase__ : Matrix | int | float ):
if isinstance(lowerCamelCase__ ,(int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(lowerCamelCase__ ,lowerCamelCase__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self : Optional[int] ,lowerCamelCase__ : int ):
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
UpperCAmelCase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] ,lowerCamelCase__ : list[int] ,lowerCamelCase__ : list[int] ):
return sum(row[i] * column[i] for i in range(len(lowerCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 | """simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = [R"h\.\d+\.attn\.bias", R"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : str ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : int = 50_257 ,lowerCamelCase__ : int = 1_024 ,lowerCamelCase__ : int = 768 ,lowerCamelCase__ : int = 12 ,lowerCamelCase__ : int = 12 ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : str = "gelu_new" ,lowerCamelCase__ : float = 0.1 ,lowerCamelCase__ : float = 0.1 ,lowerCamelCase__ : float = 0.1 ,lowerCamelCase__ : float = 1e-5 ,lowerCamelCase__ : float = 0.0_2 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,):
super().__init__()
UpperCAmelCase__ = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
UpperCAmelCase__ = prefix_inner_dim
UpperCAmelCase__ = prefix_hidden_dim
UpperCAmelCase__ = (
nn.Linear(self.prefix_inner_dim ,self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
UpperCAmelCase__ = (
nn.Linear(self.prefix_hidden_dim ,lowerCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
UpperCAmelCase__ = GPTaConfig(
vocab_size=lowerCamelCase__ ,n_positions=lowerCamelCase__ ,n_embd=lowerCamelCase__ ,n_layer=lowerCamelCase__ ,n_head=lowerCamelCase__ ,n_inner=lowerCamelCase__ ,activation_function=lowerCamelCase__ ,resid_pdrop=lowerCamelCase__ ,embd_pdrop=lowerCamelCase__ ,attn_pdrop=lowerCamelCase__ ,layer_norm_epsilon=lowerCamelCase__ ,initializer_range=lowerCamelCase__ ,scale_attn_weights=lowerCamelCase__ ,use_cache=lowerCamelCase__ ,scale_attn_by_inverse_layer_idx=lowerCamelCase__ ,reorder_and_upcast_attn=lowerCamelCase__ ,)
UpperCAmelCase__ = GPTaLMHeadModel(lowerCamelCase__ )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : torch.Tensor ,lowerCamelCase__ : torch.Tensor ,lowerCamelCase__ : Optional[torch.Tensor] = None ,lowerCamelCase__ : Optional[torch.Tensor] = None ,):
UpperCAmelCase__ = self.transformer.transformer.wte(lowerCamelCase__ )
UpperCAmelCase__ = self.encode_prefix(lowerCamelCase__ )
UpperCAmelCase__ = self.decode_prefix(lowerCamelCase__ )
UpperCAmelCase__ = torch.cat((prefix_embeds, embedding_text) ,dim=1 )
if labels is not None:
UpperCAmelCase__ = self.get_dummy_token(input_ids.shape[0] ,input_ids.device )
UpperCAmelCase__ = torch.cat((dummy_token, input_ids) ,dim=1 )
UpperCAmelCase__ = self.transformer(inputs_embeds=lowerCamelCase__ ,labels=lowerCamelCase__ ,attention_mask=lowerCamelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : torch.device ):
return torch.zeros(lowerCamelCase__ ,self.prefix_length ,dtype=torch.intaa ,device=lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : List[str] ):
return self.encode_prefix(lowerCamelCase__ )
@torch.no_grad()
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[int] ):
UpperCAmelCase__ = torch.split(lowerCamelCase__ ,1 ,dim=0 )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for feature in features:
UpperCAmelCase__ = self.decode_prefix(feature.to(lowerCamelCase__ ) ) # back to the clip feature
# Only support beam search for now
UpperCAmelCase__ , UpperCAmelCase__ = self.generate_beam(
input_embeds=lowerCamelCase__ ,device=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
UpperCAmelCase__ = torch.stack(lowerCamelCase__ )
UpperCAmelCase__ = torch.stack(lowerCamelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : str=None ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : int = 5 ,lowerCamelCase__ : int = 67 ,lowerCamelCase__ : float = 1.0 ,lowerCamelCase__ : Optional[int] = None ,):
UpperCAmelCase__ = eos_token_id
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = torch.ones(lowerCamelCase__ ,device=lowerCamelCase__ ,dtype=torch.int )
UpperCAmelCase__ = torch.zeros(lowerCamelCase__ ,device=lowerCamelCase__ ,dtype=torch.bool )
if input_embeds is not None:
UpperCAmelCase__ = input_embeds
else:
UpperCAmelCase__ = self.transformer.transformer.wte(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
UpperCAmelCase__ = self.transformer(inputs_embeds=lowerCamelCase__ )
UpperCAmelCase__ = outputs.logits
UpperCAmelCase__ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
UpperCAmelCase__ = logits.softmax(-1 ).log()
if scores is None:
UpperCAmelCase__ , UpperCAmelCase__ = logits.topk(lowerCamelCase__ ,-1 )
UpperCAmelCase__ = generated.expand(lowerCamelCase__ ,*generated.shape[1:] )
UpperCAmelCase__ , UpperCAmelCase__ = next_tokens.permute(1 ,0 ), scores.squeeze(0 )
if tokens is None:
UpperCAmelCase__ = next_tokens
else:
UpperCAmelCase__ = tokens.expand(lowerCamelCase__ ,*tokens.shape[1:] )
UpperCAmelCase__ = torch.cat((tokens, next_tokens) ,dim=1 )
else:
UpperCAmelCase__ = -float(np.inf )
UpperCAmelCase__ = 0
UpperCAmelCase__ = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
UpperCAmelCase__ = scores_sum / seq_lengths[:, None]
UpperCAmelCase__ , UpperCAmelCase__ = scores_sum_average.view(-1 ).topk(lowerCamelCase__ ,-1 )
UpperCAmelCase__ = next_tokens // scores_sum.shape[1]
UpperCAmelCase__ = seq_lengths[next_tokens_source]
UpperCAmelCase__ = next_tokens % scores_sum.shape[1]
UpperCAmelCase__ = next_tokens.unsqueeze(1 )
UpperCAmelCase__ = tokens[next_tokens_source]
UpperCAmelCase__ = torch.cat((tokens, next_tokens) ,dim=1 )
UpperCAmelCase__ = generated[next_tokens_source]
UpperCAmelCase__ = scores_sum_average * seq_lengths
UpperCAmelCase__ = is_stopped[next_tokens_source]
UpperCAmelCase__ = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] ,1 ,-1 )
UpperCAmelCase__ = torch.cat((generated, next_token_embed) ,dim=1 )
UpperCAmelCase__ = is_stopped + next_tokens.eq(lowerCamelCase__ ).squeeze()
if is_stopped.all():
break
UpperCAmelCase__ = scores / seq_lengths
UpperCAmelCase__ = scores.argsort(descending=lowerCamelCase__ )
# tokens tensors are already padded to max_seq_length
UpperCAmelCase__ = [tokens[i] for i in order]
UpperCAmelCase__ = torch.stack(lowerCamelCase__ ,dim=0 )
UpperCAmelCase__ = torch.tensor([seq_lengths[i] for i in order] ,dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 632 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class a ( a__ ):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 4 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
SCREAMING_SNAKE_CASE__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _UpperCamelCase( datasets.BuilderConfig ):
__SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
def UpperCAmelCase__ ( lowerCamelCase_ : "pyspark.sql.DataFrame" , lowerCamelCase_ : List[int] , ):
import pyspark
def generate_fn():
__a : List[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
__a : Optional[int] = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' )
__a : Optional[Any] = partition_df.collect()
__a : Union[str, Any] = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class _UpperCamelCase( _BaseExamplesIterable ):
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : Dict=None , ):
'''simple docstring'''
__a : List[str] = df
__a : Tuple = partition_order or range(self.df.rdd.getNumPartitions() )
__a : List[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Tuple ):
'''simple docstring'''
yield from self.generate_examples_fn()
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.random.Generator ):
'''simple docstring'''
__a : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Union[str, Any] = self.split_shard_indices_by_worker(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return len(self.partition_order )
class _UpperCamelCase( datasets.DatasetBuilder ):
__SCREAMING_SNAKE_CASE : List[str] = SparkConfig
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : str = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
'''simple docstring'''
import pyspark
__a : int = pyspark.sql.SparkSession.builder.getOrCreate()
__a : Optional[int] = df
__a : List[Any] = working_dir
super().__init__(
cache_dir=SCREAMING_SNAKE_CASE__ , config_name=str(self.df.semanticHash() ) , **SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
def create_cache_and_write_probe(SCREAMING_SNAKE_CASE__ : List[str] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(SCREAMING_SNAKE_CASE__ , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__a : List[Any] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(SCREAMING_SNAKE_CASE__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : datasets.download.download_manager.DownloadManager ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(SCREAMING_SNAKE_CASE__ : int ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
__a : List[str] = self.df.count()
__a : Dict = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__a : List[str] = (
self.df.limit(SCREAMING_SNAKE_CASE__ )
.repartition(1 )
.mapInArrow(SCREAMING_SNAKE_CASE__ , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__a : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__a : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , int(approx_total_size / max_shard_size ) )
__a : int = self.df.repartition(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , ):
'''simple docstring'''
import pyspark
__a : Any = ParquetWriter if file_format == 'parquet' else ArrowWriter
__a : Union[str, Any] = os.path.join(self._working_dir , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) if self._working_dir else fpath
__a : Optional[int] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__a : List[str] = self.config.features
__a : int = self._writer_batch_size
__a : Union[str, Any] = self._fs.storage_options
def write_arrow(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__a : Any = pyspark.TaskContext().taskAttemptId()
__a : str = next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
__a : Any = 0
__a : List[str] = writer_class(
features=SCREAMING_SNAKE_CASE__ , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
__a : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__a , __a : Optional[int] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
__a : Optional[Any] = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
__a : Union[str, Any] = pa.Table.from_batches([batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
if writer._num_bytes > 0:
__a , __a : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(SCREAMING_SNAKE_CASE__ ) ):
__a : Any = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE__ ) , os.path.basename(SCREAMING_SNAKE_CASE__ ) )
shutil.move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Dict = (
self.df.mapInArrow(SCREAMING_SNAKE_CASE__ , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , SCREAMING_SNAKE_CASE__ : str = "arrow" , SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
self._validate_cache_dir()
__a : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = not is_remote_filesystem(self._fs )
__a : Optional[Any] = os.path.join if is_local else posixpath.join
__a : Any = '-TTTTT-SSSSS-of-NNNNN'
__a : Union[str, Any] = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
__a : Any = path_join(self._output_dir , SCREAMING_SNAKE_CASE__ )
__a : Any = 0
__a : Dict = 0
__a : int = 0
__a : List[str] = []
__a : Optional[int] = []
for task_id, content in self._prepare_split_single(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[int] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(SCREAMING_SNAKE_CASE__ )
__a : List[str] = total_num_examples
__a : Optional[int] = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
__a : Any = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__a : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ):
rename(
SCREAMING_SNAKE_CASE__ , fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace('TTTTT-SSSSS' , f'''{global_shard_id:05d}''' ).replace('NNNNN' , f'''{total_shards:05d}''' ) , )
__a : Union[str, Any] = []
__a : List[str] = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__a , __a : Union[str, Any] = task_id_and_num_shards[i]
for shard_id in range(SCREAMING_SNAKE_CASE__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ).map(lambda SCREAMING_SNAKE_CASE__ : _rename_shard(*SCREAMING_SNAKE_CASE__ ) ).collect()
else:
# don't use any pattern
__a : List[Any] = 0
__a : Any = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace(SCREAMING_SNAKE_CASE__ , '' ) , )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 47 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __snake_case( unittest.TestCase ):
@slow
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
_SCREAMING_SNAKE_CASE = {
'''input_ids''': tf.convert_to_tensor([[0, 2_646, 10_269, 83, 99_942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
_SCREAMING_SNAKE_CASE = model(_UpperCAmelCase )['''last_hidden_state''']
_SCREAMING_SNAKE_CASE = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 713 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Tuple = logging.get_logger(__name__)
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_SCREAMING_SNAKE_CASE = 128
elif "12-12" in model_name:
_SCREAMING_SNAKE_CASE = 12
_SCREAMING_SNAKE_CASE = 12
elif "14-14" in model_name:
_SCREAMING_SNAKE_CASE = 14
_SCREAMING_SNAKE_CASE = 14
elif "16-16" in model_name:
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 16
else:
raise ValueError('''Model not supported''' )
_SCREAMING_SNAKE_CASE = '''huggingface/label-files'''
if "speech-commands" in model_name:
_SCREAMING_SNAKE_CASE = 35
_SCREAMING_SNAKE_CASE = '''speech-commands-v2-id2label.json'''
else:
_SCREAMING_SNAKE_CASE = 527
_SCREAMING_SNAKE_CASE = '''audioset-id2label.json'''
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
_SCREAMING_SNAKE_CASE = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
if "module.v" in name:
_SCREAMING_SNAKE_CASE = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
_SCREAMING_SNAKE_CASE = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
_SCREAMING_SNAKE_CASE = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
_SCREAMING_SNAKE_CASE = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
_SCREAMING_SNAKE_CASE = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
_SCREAMING_SNAKE_CASE = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
_SCREAMING_SNAKE_CASE = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
_SCREAMING_SNAKE_CASE = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_SCREAMING_SNAKE_CASE = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_SCREAMING_SNAKE_CASE = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_SCREAMING_SNAKE_CASE = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_SCREAMING_SNAKE_CASE = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_SCREAMING_SNAKE_CASE = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
_SCREAMING_SNAKE_CASE = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
_SCREAMING_SNAKE_CASE = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def A__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_SCREAMING_SNAKE_CASE = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
_SCREAMING_SNAKE_CASE = key.split('''.''' )
_SCREAMING_SNAKE_CASE = int(key_split[3] )
_SCREAMING_SNAKE_CASE = config.hidden_size
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[:dim, :]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
_SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2]
_SCREAMING_SNAKE_CASE = val[-dim:]
else:
_SCREAMING_SNAKE_CASE = val
return orig_state_dict
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
@torch.no_grad()
def A__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = get_audio_spectrogram_transformer_config(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
_SCREAMING_SNAKE_CASE = model_name_to_url[model_name]
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' )
# remove some keys
remove_keys(UpperCamelCase__ )
# rename some keys
_SCREAMING_SNAKE_CASE = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
# load 🤗 model
_SCREAMING_SNAKE_CASE = ASTForAudioClassification(UpperCamelCase__ )
model.eval()
model.load_state_dict(UpperCamelCase__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_SCREAMING_SNAKE_CASE = -4.2_67_73_93 if '''speech-commands''' not in model_name else -6.84_59_78
_SCREAMING_SNAKE_CASE = 4.5_68_99_74 if '''speech-commands''' not in model_name else 5.5_65_45_26
_SCREAMING_SNAKE_CASE = 1_024 if '''speech-commands''' not in model_name else 128
_SCREAMING_SNAKE_CASE = ASTFeatureExtractor(mean=UpperCamelCase__ , std=UpperCamelCase__ , max_length=UpperCamelCase__ )
if "speech-commands" in model_name:
_SCREAMING_SNAKE_CASE = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
_SCREAMING_SNAKE_CASE = dataset[0]['''audio''']['''array''']
else:
_SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = torchaudio.load(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = waveform.squeeze().numpy()
_SCREAMING_SNAKE_CASE = feature_extractor(UpperCamelCase__ , sampling_rate=16_000 , return_tensors='''pt''' )
# forward pass
_SCREAMING_SNAKE_CASE = model(**UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_SCREAMING_SNAKE_CASE = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_SCREAMING_SNAKE_CASE = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_SCREAMING_SNAKE_CASE = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_SCREAMING_SNAKE_CASE = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_SCREAMING_SNAKE_CASE = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_SCREAMING_SNAKE_CASE = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_SCREAMING_SNAKE_CASE = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
_SCREAMING_SNAKE_CASE = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase : List[str] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 168 | 0 |
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_SCREAMING_SNAKE_CASE = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def __a(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
_lowerCAmelCase = _TestCommandArgs(dataset=SCREAMING_SNAKE_CASE_ , all_configs=SCREAMING_SNAKE_CASE_ , save_infos=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = TestCommand(*SCREAMING_SNAKE_CASE_ )
test_command.run()
_lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" )
assert os.path.exists(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = DatasetInfosDict.from_directory(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 2351563,
"num_examples": 10000,
},
{
"name": "validation",
"num_bytes": 238418,
"num_examples": 1000,
},
] , download_size=3940680 , dataset_size=2589981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
_lowerCAmelCase , _lowerCAmelCase = getattr(dataset_infos["default"] , SCREAMING_SNAKE_CASE_ ), getattr(expected_dataset_infos["default"] , SCREAMING_SNAKE_CASE_ )
if key == "num_bytes":
assert is_apercent_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif key == "splits":
assert list(SCREAMING_SNAKE_CASE_ ) == list(SCREAMING_SNAKE_CASE_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 18 |
from collections.abc import Iterable
from typing import Generic, TypeVar
UpperCamelCase = TypeVar('_T')
class _A ( Generic[_T] ):
def __init__( self : int , lowerCamelCase__ : Iterable[_T] | None = None ):
"""simple docstring"""
__UpperCamelCase : list[_T] = list(iterable or [] )
__UpperCamelCase : list[_T] = []
def __len__( self : str ):
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Dict ):
"""simple docstring"""
return f'Queue({tuple(self._stacka[::-1] + self._stacka )})'
def a ( self : Union[str, Any] , lowerCamelCase__ : _T ):
"""simple docstring"""
self._stacka.append(lowerCamelCase__ )
def a ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : Any = self._stacka.pop
__UpperCamelCase : int = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 269 | 0 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ : Tuple = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ : Tuple = concatenate_datasets
A_ : List[str] = DownloadConfig
A_ : int = DownloadManager
A_ : Optional[Any] = DownloadMode
A_ : Optional[int] = DownloadConfig
A_ : List[Any] = DownloadMode
A_ : Any = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 616 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
A_ : Tuple = logging.getLogger(__name__)
A_ : Tuple = "Hello world! cécé herlolip"
A_ : Dict = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = BertAbsConfig(
temp_dir=""".""" , finetune_bert=snake_case__ , large=snake_case__ , share_emb=snake_case__ , use_bert_emb=snake_case__ , encoder="""bert""" , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE__ = torch.load(snake_case__ , lambda snake_case__ , snake_case__ : storage )
SCREAMING_SNAKE_CASE__ = AbsSummarizer(snake_case__ , torch.device("""cpu""" ) , snake_case__ )
original.eval()
SCREAMING_SNAKE_CASE__ = BertAbsSummarizer(snake_case__ , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
SCREAMING_SNAKE_CASE__ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(snake_case__ )) )
SCREAMING_SNAKE_CASE__ = torch.tensor(snake_case__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(snake_case__ )) )
SCREAMING_SNAKE_CASE__ = torch.tensor(snake_case__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
SCREAMING_SNAKE_CASE__ = encoder_input_ids
SCREAMING_SNAKE_CASE__ = decoder_input_ids
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE__ = original(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )[0]
SCREAMING_SNAKE_CASE__ = original.generator(snake_case__ )
SCREAMING_SNAKE_CASE__ = new_model(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )[0]
SCREAMING_SNAKE_CASE__ = new_model.generator(snake_case__ )
SCREAMING_SNAKE_CASE__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(snake_case__ ) )
SCREAMING_SNAKE_CASE__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(snake_case__ ) )
SCREAMING_SNAKE_CASE__ = torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
A_ : Tuple = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 616 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCamelCase = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
__lowercase ='''lm_head'''
__lowercase =getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
__lowercase =getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
__lowercase =hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowercase =value
elif weight_type == "weight_g":
__lowercase =value
elif weight_type == "weight_v":
__lowercase =value
elif weight_type == "bias":
__lowercase =value
else:
__lowercase =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[]
__lowercase =fairseq_model.state_dict()
__lowercase =hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
__lowercase =False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == 'group' , )
__lowercase =True
else:
for key, mapped_key in MAPPING.items():
__lowercase ='''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowercase =True
if "*" in mapped_key:
__lowercase =name.split(UpperCAmelCase_ )[0].split('.' )[-2]
__lowercase =mapped_key.replace('*' , UpperCAmelCase_ )
if "weight_g" in name:
__lowercase ='''weight_g'''
elif "weight_v" in name:
__lowercase ='''weight_v'''
elif "bias" in name:
__lowercase ='''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase ='''weight'''
else:
__lowercase =None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =full_name.split('conv_layers.' )[-1]
__lowercase =name.split('.' )
__lowercase =int(items[0] )
__lowercase =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowercase =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowercase =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowercase =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowercase =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase_ )
@torch.no_grad()
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True ):
"""simple docstring"""
if config_path is not None:
__lowercase =UniSpeechConfig.from_pretrained(UpperCAmelCase_ )
else:
__lowercase =UniSpeechConfig()
if is_finetuned:
if dict_path:
__lowercase =Dictionary.load_from_json(UpperCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase =target_dict.pad_index
__lowercase =target_dict.bos_index
__lowercase =target_dict.eos_index
__lowercase =len(target_dict.symbols )
__lowercase =os.path.join(UpperCAmelCase_ , 'vocab.json' )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCAmelCase_ ) )
return
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
__lowercase =target_dict.indices
# fairseq has the <pad> and <s> switched
__lowercase =42
__lowercase =43
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
__lowercase =WavaVecaPhonemeCTCTokenizer(
UpperCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCAmelCase_ , )
__lowercase =True if config.feat_extract_norm == '''layer''' else False
__lowercase =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
__lowercase =WavaVecaProcessor(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
__lowercase =UniSpeechForCTC(UpperCAmelCase_ )
else:
__lowercase =UniSpeechForPreTraining(UpperCAmelCase_ )
if is_finetuned:
__lowercase =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
__lowercase =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__lowercase =model[0].eval()
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
hf_unispeech.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCamelCase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 474 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Optional[int] = logging.get_logger(__name__)
snake_case : Optional[int] = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'unispeech'
def __init__( self , _lowerCamelCase=32 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase="group" , _lowerCamelCase="gelu" , _lowerCamelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCamelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCamelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCamelCase=False , _lowerCamelCase=128 , _lowerCamelCase=16 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.05 , _lowerCamelCase=10 , _lowerCamelCase=2 , _lowerCamelCase=0.0 , _lowerCamelCase=10 , _lowerCamelCase=0 , _lowerCamelCase=320 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=100 , _lowerCamelCase=256 , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase="mean" , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=256 , _lowerCamelCase=80 , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=0.5 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase )
a :Any = hidden_size
a :str = feat_extract_norm
a :List[Any] = feat_extract_activation
a :Tuple = list(_lowerCamelCase )
a :Any = list(_lowerCamelCase )
a :List[Any] = list(_lowerCamelCase )
a :Union[str, Any] = conv_bias
a :str = num_conv_pos_embeddings
a :str = num_conv_pos_embedding_groups
a :Tuple = len(self.conv_dim )
a :int = num_hidden_layers
a :Any = intermediate_size
a :Optional[Any] = hidden_act
a :Tuple = num_attention_heads
a :Any = hidden_dropout
a :Any = attention_dropout
a :Optional[Any] = activation_dropout
a :Optional[Any] = feat_proj_dropout
a :Any = final_dropout
a :int = layerdrop
a :int = layer_norm_eps
a :Dict = initializer_range
a :Dict = num_ctc_classes
a :Optional[Any] = vocab_size
a :str = do_stable_layer_norm
a :Tuple = use_weighted_layer_sum
a :Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a :List[Any] = apply_spec_augment
a :Any = mask_time_prob
a :Union[str, Any] = mask_time_length
a :str = mask_time_min_masks
a :Tuple = mask_feature_prob
a :Dict = mask_feature_length
a :int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
a :Union[str, Any] = num_codevectors_per_group
a :Dict = num_codevector_groups
a :List[Any] = contrastive_logits_temperature
a :Union[str, Any] = feat_quantizer_dropout
a :Optional[Any] = num_negatives
a :Tuple = codevector_dim
a :Optional[Any] = proj_codevector_dim
a :Union[str, Any] = diversity_loss_weight
# ctc loss
a :List[Any] = ctc_loss_reduction
a :Union[str, Any] = ctc_zero_infinity
# pretraining loss
a :int = replace_prob
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 445 | 0 |
import math
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Any = [True] * n
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : List[str] = False
__UpperCamelCase : Any = True
for i in range(3 , int(n**0.5 + 1) , 2):
__UpperCamelCase : Union[str, Any] = i * 2
while index < n:
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : str = index + i
__UpperCamelCase : Union[str, Any] = [2]
for i in range(3 , _lowerCamelCase , 2):
if is_prime[i]:
primes.append(_lowerCamelCase)
return primes
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] = 999_966_663_333) -> Tuple:
'''simple docstring'''
__UpperCamelCase : int = math.floor(math.sqrt(_lowerCamelCase)) + 100
__UpperCamelCase : Union[str, Any] = prime_sieve(_lowerCamelCase)
__UpperCamelCase : List[Any] = 0
__UpperCamelCase : Union[str, Any] = 0
__UpperCamelCase : int = primes[prime_index]
while (last_prime**2) <= limit:
__UpperCamelCase : Any = primes[prime_index + 1]
__UpperCamelCase : Dict = last_prime**2
__UpperCamelCase : str = next_prime**2
# Get numbers divisible by lps(current)
__UpperCamelCase : int = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__UpperCamelCase : str = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__UpperCamelCase : Dict = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__UpperCamelCase : Optional[int] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution()) | 716 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any]) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Tuple = BertConfig.from_json_file(_lowerCamelCase)
print(F'Building PyTorch model from configuration: {config}')
__UpperCamelCase : List[Any] = BertForPreTraining(_lowerCamelCase)
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}')
torch.save(model.state_dict() , _lowerCamelCase)
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 94 | 0 |
from __future__ import annotations
from typing import Any
class lowerCamelCase_ ( _lowercase ):
pass
class lowerCamelCase_ :
def __init__( self : Optional[int] , __A : Any ):
__A : Any = data
__A : Node | None = None
def __iter__( self : Dict ):
__A : Optional[Any] = self
__A : Optional[int] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__A )
yield node.data
__A : Optional[Any] = node.next_node
@property
def lowerCAmelCase_ ( self : Tuple ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = Node(1)
UpperCAmelCase_ : List[Any] = Node(2)
UpperCAmelCase_ : Optional[int] = Node(3)
UpperCAmelCase_ : List[Any] = Node(4)
print(root_node.has_loop) # False
UpperCAmelCase_ : str = root_node.next_node
print(root_node.has_loop) # True
UpperCAmelCase_ : int = Node(5)
UpperCAmelCase_ : Dict = Node(6)
UpperCAmelCase_ : Any = Node(5)
UpperCAmelCase_ : Union[str, Any] = Node(6)
print(root_node.has_loop) # False
UpperCAmelCase_ : Union[str, Any] = Node(1)
print(root_node.has_loop) # False
| 17 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
SCREAMING_SNAKE_CASE = namedtuple("covid_data", "cases deaths recovered")
def snake_case__ ( __SCREAMING_SNAKE_CASE = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
UpperCAmelCase_ = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 579 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowerCamelCase_ ( snake_case_ ):
pass
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = data
SCREAMING_SNAKE_CASE : Node | None = None
def __iter__( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self
SCREAMING_SNAKE_CASE : Optional[int] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCAmelCase__ )
yield node.data
SCREAMING_SNAKE_CASE : str = node.next_node
@property
def __lowercase ( self : str ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowerCAmelCase_ : Optional[int] = Node(1)
lowerCAmelCase_ : Any = Node(2)
lowerCAmelCase_ : Tuple = Node(3)
lowerCAmelCase_ : Optional[int] = Node(4)
print(root_node.has_loop) # False
lowerCAmelCase_ : Union[str, Any] = root_node.next_node
print(root_node.has_loop) # True
lowerCAmelCase_ : str = Node(5)
lowerCAmelCase_ : str = Node(6)
lowerCAmelCase_ : Optional[int] = Node(5)
lowerCAmelCase_ : Optional[int] = Node(6)
print(root_node.has_loop) # False
lowerCAmelCase_ : List[Any] = Node(1)
print(root_node.has_loop) # False
| 464 |
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
def __init__( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : str=13 , lowerCAmelCase__ : List[Any]=7 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Dict=99 , lowerCAmelCase__ : List[str]=32 , lowerCAmelCase__ : str=5 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Any=37 , lowerCAmelCase__ : Tuple="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : str=5_12 , lowerCAmelCase__ : List[str]=16 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : Union[str, Any]=0.02 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Tuple=4 , lowerCAmelCase__ : List[Any]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Tuple = use_input_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : Tuple = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : str = type_vocab_size
SCREAMING_SNAKE_CASE : int = type_sequence_label_size
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : List[Any] = num_choices
SCREAMING_SNAKE_CASE : Any = scope
def __lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self : List[Any] ):
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def __lowercase ( self : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = BioGptModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : str , *lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# create attention mask
SCREAMING_SNAKE_CASE : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.seq_length // 2
SCREAMING_SNAKE_CASE : Any = 0
# first forward pass
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor((1,) , lowerCAmelCase__ ).item() + 1
SCREAMING_SNAKE_CASE : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
SCREAMING_SNAKE_CASE : str = random_other_next_tokens
# append to next input_ids and attn_mask
SCREAMING_SNAKE_CASE : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE : str = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCAmelCase__ )] , dim=1 , )
# get two different outputs
SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )['''last_hidden_state''']
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
def __lowercase ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , *lowerCAmelCase__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = BioGptModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase__ )
# first forward pass
SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )['''last_hidden_state''']
SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[
'''last_hidden_state'''
]
# select random slice
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
def __lowercase ( self : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , *lowerCAmelCase__ : Any , lowerCAmelCase__ : int=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = BioGptForCausalLM(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __lowercase ( self : Any , lowerCAmelCase__ : str , *lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __lowercase ( self : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , *lowerCAmelCase__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = BioGptForTokenClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
_lowerCAmelCase : Dict = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : Optional[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : List[str] = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : Dict = False
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def __lowercase ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __lowercase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : Optional[Any] = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCAmelCase__ )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCAmelCase__ , gradient_checkpointing=lowerCAmelCase__ )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCAmelCase__ )
def __lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCAmelCase__ )
def __lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCAmelCase__ )
@slow
def __lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = '''left'''
# Define PAD Token = EOS Token = 50256
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE : Dict = model.config.eos_token_id
# use different length sentences to test batching
SCREAMING_SNAKE_CASE : Any = [
'''Hello, my dog is a little''',
'''Today, I''',
]
SCREAMING_SNAKE_CASE : List[str] = tokenizer(lowerCAmelCase__ , return_tensors='''pt''' , padding=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = inputs['''input_ids'''].to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(
input_ids=lowerCAmelCase__ , attention_mask=inputs['''attention_mask'''].to(lowerCAmelCase__ ) , )
SCREAMING_SNAKE_CASE : int = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = model.generate(input_ids=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(input_ids=lowerCAmelCase__ , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] )
@slow
def __lowercase ( self : Tuple ):
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[Any] = BioGptModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __lowercase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = 3
SCREAMING_SNAKE_CASE : Dict = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE : str = input_ids.ne(1 ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = BioGptForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Optional[Any] = '''multi_label_classification'''
SCREAMING_SNAKE_CASE : Any = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE : Any = input_ids.ne(1 ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE : List[Any] = BioGptForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE : str = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ )[0]
SCREAMING_SNAKE_CASE : Tuple = 4_23_84
SCREAMING_SNAKE_CASE : List[Any] = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def __lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE : Optional[Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(lowerCAmelCase__ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(
**lowerCAmelCase__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 464 | 1 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowercase_ = logging.get_logger(__name__)
class A__ :
def __init__( self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=None , lowerCamelCase=None ) -> str:
"""simple docstring"""
if not conversation_id:
__magic_name__ : Any = uuid.uuida()
if past_user_inputs is None:
__magic_name__ : Dict = []
if generated_responses is None:
__magic_name__ : Tuple = []
__magic_name__ : uuid.UUID = conversation_id
__magic_name__ : List[str] = past_user_inputs
__magic_name__ : List[str] = generated_responses
__magic_name__ : Optional[str] = text
def __eq__( self , lowerCamelCase ) -> Tuple:
"""simple docstring"""
if not isinstance(lowerCamelCase , lowerCamelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowercase ( self , lowerCamelCase , lowerCamelCase = False ) -> Tuple:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
__magic_name__ : Tuple = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
__magic_name__ : Union[str, Any] = text
def lowercase ( self ) -> Tuple:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__magic_name__ : List[Any] = None
def lowercase ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
self.generated_responses.append(lowerCamelCase )
def lowercase ( self ) -> Tuple:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> Dict:
"""simple docstring"""
__magic_name__ : List[Any] = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
__magic_name__ : str = '''user''' if is_user else '''bot'''
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
__SCREAMING_SNAKE_CASE , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class A__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
super().__init__(*lowerCamelCase , **lowerCamelCase )
if self.tokenizer.pad_token_id is None:
__magic_name__ : Optional[int] = self.tokenizer.eos_token
def lowercase ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ : Dict = {}
__magic_name__ : Union[str, Any] = {}
__magic_name__ : List[str] = {}
if min_length_for_response is not None:
__magic_name__ : Any = min_length_for_response
if minimum_tokens is not None:
__magic_name__ : Any = minimum_tokens
if "max_length" in generate_kwargs:
__magic_name__ : Optional[Any] = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__magic_name__ : Optional[int] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowerCamelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self , lowerCamelCase , lowerCamelCase=0 , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
__magic_name__ : Tuple = super().__call__(lowerCamelCase , num_workers=lowerCamelCase , **lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) == 1:
return outputs[0]
return outputs
def lowercase ( self , lowerCamelCase , lowerCamelCase=32 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
__magic_name__ : int = self.tokenizer._build_conversation_input_ids(lowerCamelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__magic_name__ : Union[str, Any] = self._legacy_parse_and_tokenize(lowerCamelCase )
if self.framework == "pt":
__magic_name__ : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__magic_name__ : List[Any] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowercase ( self , lowerCamelCase , lowerCamelCase=10 , **lowerCamelCase ) -> Dict:
"""simple docstring"""
__magic_name__ : List[str] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
__magic_name__ : Union[str, Any] = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
__magic_name__ : Optional[Any] = max_length - minimum_tokens
__magic_name__ : int = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
__magic_name__ : Tuple = model_inputs['''attention_mask'''][:, -trim:]
__magic_name__ : List[str] = model_inputs.pop('''conversation''' )
__magic_name__ : Tuple = max_length
__magic_name__ : List[Any] = self.model.generate(**lowerCamelCase , **lowerCamelCase )
if self.model.config.is_encoder_decoder:
__magic_name__ : Optional[int] = 1
else:
__magic_name__ : List[str] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowercase ( self , lowerCamelCase , lowerCamelCase=True ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ : List[Any] = model_outputs['''output_ids''']
__magic_name__ : Any = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , )
__magic_name__ : str = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(lowerCamelCase )
return conversation
def lowercase ( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
__magic_name__ : List[Any] = self.tokenizer.eos_token_id
__magic_name__ : int = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) )
if len(lowerCamelCase ) > self.tokenizer.model_max_length:
__magic_name__ : Tuple = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 154 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowercase_ = logging.get_logger(__name__)
lowercase_ = TypeVar('''DatasetType''', Dataset, IterableDataset)
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase = None, UpperCAmelCase = None, UpperCAmelCase = None, UpperCAmelCase = None, UpperCAmelCase = "first_exhausted", ) ->DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(UpperCAmelCase ):
if not isinstance(UpperCAmelCase, (Dataset, IterableDataset) ):
if isinstance(UpperCAmelCase, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'''is an empty dataset dictionary.''' )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(UpperCAmelCase )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCAmelCase ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCAmelCase ).__name__}.''' )
if i == 0:
__magic_name__ , __magic_name__ : Union[str, Any] = (
(Dataset, IterableDataset) if isinstance(UpperCAmelCase, UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCAmelCase, UpperCAmelCase ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, info=UpperCAmelCase, split=UpperCAmelCase, stopping_strategy=UpperCAmelCase )
else:
return _interleave_iterable_datasets(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, info=UpperCAmelCase, split=UpperCAmelCase, stopping_strategy=UpperCAmelCase )
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase = None, UpperCAmelCase = None, UpperCAmelCase = 0, ) ->DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(UpperCAmelCase ):
if not isinstance(UpperCAmelCase, (Dataset, IterableDataset) ):
if isinstance(UpperCAmelCase, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'''is an empty dataset dictionary.''' )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(UpperCAmelCase )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCAmelCase ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCAmelCase ).__name__}.''' )
if i == 0:
__magic_name__ , __magic_name__ : int = (
(Dataset, IterableDataset) if isinstance(UpperCAmelCase, UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCAmelCase, UpperCAmelCase ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(UpperCAmelCase, info=UpperCAmelCase, split=UpperCAmelCase, axis=UpperCAmelCase )
else:
return _concatenate_iterable_datasets(UpperCAmelCase, info=UpperCAmelCase, split=UpperCAmelCase, axis=UpperCAmelCase )
| 154 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase =logging.get_logger(__name__)
lowercase ={
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="blip_2_vision_model"
def __init__( self , snake_case=1_4_0_8 , snake_case=6_1_4_4 , snake_case=3_9 , snake_case=1_6 , snake_case=2_2_4 , snake_case=1_4 , snake_case="gelu" , snake_case=0.0_00_01 , snake_case=0.0 , snake_case=1E-1_0 , snake_case=True , **snake_case , ) -> List[str]:
'''simple docstring'''
super().__init__(**snake_case)
_UpperCAmelCase : Any =hidden_size
_UpperCAmelCase : int =intermediate_size
_UpperCAmelCase : List[Any] =num_hidden_layers
_UpperCAmelCase : int =num_attention_heads
_UpperCAmelCase : List[str] =patch_size
_UpperCAmelCase : Optional[int] =image_size
_UpperCAmelCase : Any =initializer_range
_UpperCAmelCase : Any =attention_dropout
_UpperCAmelCase : Optional[int] =layer_norm_eps
_UpperCAmelCase : Any =hidden_act
_UpperCAmelCase : Dict =qkv_bias
@classmethod
def lowerCAmelCase ( cls , snake_case , **snake_case) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(snake_case)
_UpperCAmelCase : List[str] =cls.get_config_dict(snake_case , **snake_case)
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type') == "blip-2":
_UpperCAmelCase : Any =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(snake_case , **snake_case)
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="blip_2_qformer"
def __init__( self , snake_case=3_0_5_2_2 , snake_case=7_6_8 , snake_case=1_2 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=0.02 , snake_case=1E-1_2 , snake_case=0 , snake_case="absolute" , snake_case=2 , snake_case=1_4_0_8 , **snake_case , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=snake_case , **snake_case)
_UpperCAmelCase : int =vocab_size
_UpperCAmelCase : Dict =hidden_size
_UpperCAmelCase : List[Any] =num_hidden_layers
_UpperCAmelCase : Optional[int] =num_attention_heads
_UpperCAmelCase : Optional[Any] =hidden_act
_UpperCAmelCase : int =intermediate_size
_UpperCAmelCase : Tuple =hidden_dropout_prob
_UpperCAmelCase : str =attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] =max_position_embeddings
_UpperCAmelCase : Optional[Any] =initializer_range
_UpperCAmelCase : List[str] =layer_norm_eps
_UpperCAmelCase : Optional[int] =position_embedding_type
_UpperCAmelCase : Optional[Any] =cross_attention_frequency
_UpperCAmelCase : Dict =encoder_hidden_size
@classmethod
def lowerCAmelCase ( cls , snake_case , **snake_case) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(snake_case)
_UpperCAmelCase : Tuple =cls.get_config_dict(snake_case , **snake_case)
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type') == "blip-2":
_UpperCAmelCase : Optional[Any] =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(snake_case , **snake_case)
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="blip-2"
UpperCAmelCase =True
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case=3_2 , **snake_case) -> str:
'''simple docstring'''
super().__init__(**snake_case)
if vision_config is None:
_UpperCAmelCase : List[str] ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.')
if qformer_config is None:
_UpperCAmelCase : Optional[int] ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.')
if text_config is None:
_UpperCAmelCase : int ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).')
_UpperCAmelCase : Union[str, Any] =BlipaVisionConfig(**snake_case)
_UpperCAmelCase : int =BlipaQFormerConfig(**snake_case)
_UpperCAmelCase : int =text_config['model_type'] if 'model_type' in text_config else 'opt'
_UpperCAmelCase : Union[str, Any] =CONFIG_MAPPING[text_model_type](**snake_case)
_UpperCAmelCase : List[str] =self.text_config.tie_word_embeddings
_UpperCAmelCase : str =self.text_config.is_encoder_decoder
_UpperCAmelCase : str =num_query_tokens
_UpperCAmelCase : Dict =self.vision_config.hidden_size
_UpperCAmelCase : Optional[int] =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_UpperCAmelCase : Tuple =1.0
_UpperCAmelCase : List[Any] =0.02
@classmethod
def lowerCAmelCase ( cls , snake_case , snake_case , snake_case , **snake_case , ) -> List[Any]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **snake_case , )
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int =copy.deepcopy(self.__dict__)
_UpperCAmelCase : Optional[int] =self.vision_config.to_dict()
_UpperCAmelCase : Tuple =self.qformer_config.to_dict()
_UpperCAmelCase : int =self.text_config.to_dict()
_UpperCAmelCase : List[str] =self.__class__.model_type
return output
| 716 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase =logging.get_logger(__name__)
lowercase ={
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="dpr"
def __init__( self , snake_case=3_0_5_2_2 , snake_case=7_6_8 , snake_case=1_2 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=2 , snake_case=0.02 , snake_case=1E-1_2 , snake_case=0 , snake_case="absolute" , snake_case = 0 , **snake_case , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=snake_case , **snake_case)
_UpperCAmelCase : int =vocab_size
_UpperCAmelCase : Dict =hidden_size
_UpperCAmelCase : List[Any] =num_hidden_layers
_UpperCAmelCase : List[Any] =num_attention_heads
_UpperCAmelCase : str =hidden_act
_UpperCAmelCase : Optional[Any] =intermediate_size
_UpperCAmelCase : Optional[Any] =hidden_dropout_prob
_UpperCAmelCase : Tuple =attention_probs_dropout_prob
_UpperCAmelCase : int =max_position_embeddings
_UpperCAmelCase : Tuple =type_vocab_size
_UpperCAmelCase : Union[str, Any] =initializer_range
_UpperCAmelCase : Tuple =layer_norm_eps
_UpperCAmelCase : int =projection_dim
_UpperCAmelCase : List[Any] =position_embedding_type
| 331 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = ['pixel_values']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =size if size is not None else {'shortest_edge': 224}
SCREAMING_SNAKE_CASE_ : List[str] =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE_ : Tuple =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='crop_size' )
SCREAMING_SNAKE_CASE_ : int =do_resize
SCREAMING_SNAKE_CASE_ : Optional[Any] =size
SCREAMING_SNAKE_CASE_ : Any =resample
SCREAMING_SNAKE_CASE_ : Union[str, Any] =do_center_crop
SCREAMING_SNAKE_CASE_ : Optional[Any] =crop_size
SCREAMING_SNAKE_CASE_ : Optional[Any] =do_rescale
SCREAMING_SNAKE_CASE_ : Tuple =rescale_factor
SCREAMING_SNAKE_CASE_ : List[Any] =do_normalize
SCREAMING_SNAKE_CASE_ : str =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE_ : str =image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE_ : Optional[Any] =do_convert_rgb
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Tuple =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE_ : List[Any] =get_resize_output_image_size(__UpperCAmelCase , size=size['shortest_edge'] , default_to_square=__UpperCAmelCase )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : List[str] =get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__UpperCAmelCase , size=(size['height'], size['width']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : List[Any] =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : int =size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : List[Any] =get_size_dict(__UpperCAmelCase , param_name='size' , default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : int =do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : Any =crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : Optional[Any] =get_size_dict(__UpperCAmelCase , param_name='crop_size' , default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : List[str] =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Optional[Any] =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : Tuple =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : int =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : List[str] =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_ : str =make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_ : Optional[int] =[convert_to_rgb(__UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : List[str] =[to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : List[str] =[self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ : int =[self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : str =[self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : int =[self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_ : Optional[Any] =[to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_ : Tuple ={'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 220 |
import logging
from transformers.configuration_utils import PretrainedConfig
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = 'masked_bert'
def __init__( self , __UpperCAmelCase=30_522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3_072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=0 , __UpperCAmelCase="topK" , __UpperCAmelCase="constant" , __UpperCAmelCase=0.0 , **__UpperCAmelCase , ):
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple =vocab_size
SCREAMING_SNAKE_CASE_ : Any =hidden_size
SCREAMING_SNAKE_CASE_ : Dict =num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] =hidden_act
SCREAMING_SNAKE_CASE_ : List[Any] =intermediate_size
SCREAMING_SNAKE_CASE_ : str =hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] =max_position_embeddings
SCREAMING_SNAKE_CASE_ : Any =type_vocab_size
SCREAMING_SNAKE_CASE_ : int =initializer_range
SCREAMING_SNAKE_CASE_ : Dict =layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple =pruning_method
SCREAMING_SNAKE_CASE_ : Optional[Any] =mask_init
SCREAMING_SNAKE_CASE_ : Optional[int] =mask_scale
| 220 | 1 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Tuple , *_A : Union[str, Any] , **_A : Optional[int] ):
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 71 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_A ).to(_A )
_UpperCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_UpperCamelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
_UpperCamelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
_UpperCamelCase = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss
_UpperCamelCase = -(labels.shape[-1] * loss.item())
_UpperCamelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 71 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase :Tuple = None
lowerCAmelCase :Any = logging.get_logger(__name__)
lowerCAmelCase :int = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase :int = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase :Any = {
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
lowerCAmelCase :Optional[Any] = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _lowerCamelCase ( A__ ):
'''simple docstring'''
A_ : Optional[Any] = VOCAB_FILES_NAMES
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A_ : int = ["""input_ids""", """attention_mask"""]
A_ : Union[str, Any] = MBartTokenizer
A_ : Optional[int] = []
A_ : int = []
def __init__( self : Optional[int] , _A : List[str]=None , _A : int=None , _A : Optional[int]="<s>" , _A : str="</s>" , _A : Optional[Any]="</s>" , _A : Optional[int]="<s>" , _A : int="<unk>" , _A : Any="<pad>" , _A : Tuple="<mask>" , _A : int=None , _A : int=None , _A : Any=None , **_A : List[Any] , ) -> Any:
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
__magic_name__ : Optional[int] = vocab_file
__magic_name__ : Dict = False if not self.vocab_file else True
__magic_name__ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
__magic_name__ : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__magic_name__ : int = src_lang if src_lang is not None else 'en_XX'
__magic_name__ : Tuple = self.convert_tokens_to_ids(self._src_lang )
__magic_name__ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __lowerCAmelCase ( self : int ) -> str:
return self._src_lang
@src_lang.setter
def __lowerCAmelCase ( self : Dict , _A : str ) -> None:
__magic_name__ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowerCAmelCase ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCAmelCase ( self : List[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
__magic_name__ : str = [self.sep_token_id]
__magic_name__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self : Optional[Any] , _A : Any , _A : str , _A : Optional[str] , _A : Optional[str] , **_A : List[str] ) -> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__magic_name__ : Any = src_lang
__magic_name__ : Optional[Any] = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
__magic_name__ : Any = self.convert_tokens_to_ids(__lowerCamelCase )
__magic_name__ : List[Any] = tgt_lang_id
return inputs
def __lowerCAmelCase ( self : List[Any] , _A : List[str] , _A : str = "en_XX" , _A : Optional[List[str]] = None , _A : str = "ro_RO" , **_A : Dict , ) -> BatchEncoding:
__magic_name__ : Tuple = src_lang
__magic_name__ : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang )
def __lowerCAmelCase ( self : Optional[Any] ) -> Any:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCAmelCase ( self : List[str] , _A : str ) -> None:
__magic_name__ : Tuple = self.convert_tokens_to_ids(__lowerCamelCase )
__magic_name__ : Optional[Any] = []
__magic_name__ : Tuple = [self.eos_token_id, self.cur_lang_code]
__magic_name__ : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
__magic_name__ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
__magic_name__ : int = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __lowerCAmelCase ( self : str , _A : str ) -> None:
__magic_name__ : Optional[Any] = self.convert_tokens_to_ids(__lowerCamelCase )
__magic_name__ : List[str] = []
__magic_name__ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
__magic_name__ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
__magic_name__ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
__magic_name__ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __lowerCAmelCase ( self : int , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
__magic_name__ : List[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,) | 561 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_SCREAMING_SNAKE_CASE : List[Any] = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
_SCREAMING_SNAKE_CASE : Tuple = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = cn.convert_to_negative(_A )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase_ ( ):
'''simple docstring'''
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_A , 1_10 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
SCREAMING_SNAKE_CASE__ = canny.canny(_A )
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase_ ( ):
'''simple docstring'''
assert gg.gaussian_filter(_A , 5 , sigma=0.9 ).all()
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
SCREAMING_SNAKE_CASE__ = conv.img_convolve(_A , _A ).astype(_A )
assert res.any()
def UpperCAmelCase_ ( ):
'''simple docstring'''
assert med.median_filter(_A , 3 ).any()
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = sob.sobel_filter(_A )
assert grad.any() and theta.any()
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = sp.make_sepia(_A , 20 )
assert sepia.all()
def UpperCAmelCase_ ( _A = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = bs.Burkes(imread(_A , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase_ ( _A = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = rs.NearestNeighbour(imread(_A , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
SCREAMING_SNAKE_CASE__ = imread(_A , 0 )
# Test for get_neighbors_pixel function() return not None
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = image[x_coordinate][y_coordinate]
SCREAMING_SNAKE_CASE__ = lbp.get_neighbors_pixel(
_A , _A , _A , _A )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
SCREAMING_SNAKE_CASE__ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
SCREAMING_SNAKE_CASE__ = lbp.local_binary_value(_A , _A , _A )
assert lbp_image.any()
| 493 | 0 |
UpperCamelCase = 8.3_144_598
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> float:
"""simple docstring"""
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
UpperCamelCase = 300
UpperCamelCase = 28
UpperCamelCase = rms_speed_of_molecule(temperature, molar_mass)
print(f"Vrms of Nitrogen gas at 300 K is {vrms} m/s")
| 702 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Union[str, Any] = KandinskyInpaintPipeline
__snake_case : Dict = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
__snake_case : Optional[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__snake_case : int = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__snake_case : Optional[int] = False
@property
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
return 32
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return 32
@property
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase ( self: str ):
'''simple docstring'''
return 100
@property
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def UpperCamelCase ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
_SCREAMING_SNAKE_CASE = MultilingualCLIP(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE = self.dummy_tokenizer
_SCREAMING_SNAKE_CASE = self.dummy_unet
_SCREAMING_SNAKE_CASE = self.dummy_movq
_SCREAMING_SNAKE_CASE = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCamelCase ( self: int , UpperCAmelCase_: Any , UpperCAmelCase_: str=0 ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCAmelCase_ )
# create init_image
_SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
_SCREAMING_SNAKE_CASE = np.ones((64, 64) , dtype=np.floataa )
_SCREAMING_SNAKE_CASE = 0
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu"""
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = output.images
_SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def UpperCamelCase ( self: str ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
_SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_SCREAMING_SNAKE_CASE = np.ones((768, 768) , dtype=np.floataa )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = """a hat"""
_SCREAMING_SNAKE_CASE = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = pipe_prior(
UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_SCREAMING_SNAKE_CASE = pipeline(
UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
| 569 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ : Optional[int] = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Dict = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowerCAmelCase_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 527 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class _lowerCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = 1_3 , lowerCAmelCase_ = 6_4 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = 1_2_8 , lowerCAmelCase_=[1_6, 3_2, 6_4, 1_2_8] , lowerCAmelCase_ = 7 , lowerCAmelCase_ = 4 , lowerCAmelCase_ = 3_7 , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 1_0 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1_2_8 , lowerCAmelCase_ = [2, 2, 2, 2] , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = parent
_SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
_SCREAMING_SNAKE_CASE : List[Any] = image_size
_SCREAMING_SNAKE_CASE : int = patch_size
_SCREAMING_SNAKE_CASE : List[str] = num_channels
_SCREAMING_SNAKE_CASE : List[Any] = is_training
_SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
_SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Dict = num_attention_heads
_SCREAMING_SNAKE_CASE : Any = intermediate_size
_SCREAMING_SNAKE_CASE : str = hidden_act
_SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
_SCREAMING_SNAKE_CASE : int = encoder_stride
_SCREAMING_SNAKE_CASE : Tuple = num_attention_outputs
_SCREAMING_SNAKE_CASE : Any = embed_dim
_SCREAMING_SNAKE_CASE : str = embed_dim + 1
_SCREAMING_SNAKE_CASE : Union[str, Any] = resolution
_SCREAMING_SNAKE_CASE : int = depths
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_sizes
_SCREAMING_SNAKE_CASE : Optional[Any] = dim
_SCREAMING_SNAKE_CASE : Any = mlp_expansion_ratio
def A ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def A ( self ) -> List[str]:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_SCREAMING_SNAKE_CASE : Optional[Any] = TFEfficientFormerModel(config=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.type_sequence_label_size
_SCREAMING_SNAKE_CASE : Any = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_SCREAMING_SNAKE_CASE : Tuple = 1
_SCREAMING_SNAKE_CASE : Any = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = config_and_inputs
_SCREAMING_SNAKE_CASE : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_: List[str] = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_: List[str] = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_: List[Any] = False
SCREAMING_SNAKE_CASE_: Optional[Any] = False
SCREAMING_SNAKE_CASE_: List[str] = False
SCREAMING_SNAKE_CASE_: Optional[Any] = False
SCREAMING_SNAKE_CASE_: str = False
def A ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFEfficientFormerModelTester(self )
_SCREAMING_SNAKE_CASE : List[str] = ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def A ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def A ( self ) -> Dict:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def A ( self ) -> Optional[Any]:
pass
def A ( self ) -> Any:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE : List[Any] = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def A ( self ) -> Union[str, Any]:
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_SCREAMING_SNAKE_CASE : List[str] = model_class(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : List[str] = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE : Optional[Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
if hasattr(self.model_tester , 'encoder_seq_length' ):
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
_SCREAMING_SNAKE_CASE : Tuple = seq_length * self.model_tester.chunk_length
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : List[str] = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCAmelCase_ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Tuple = getattr(self.model_tester , 'decoder_seq_length' , lowerCAmelCase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE : Any = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Any:
_SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def A ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def A ( self ) -> str:
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def A ( self ) -> List[str]:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : str = TFEfficientFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def A ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Optional[int] = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : int = getattr(self.model_tester , 'encoder_seq_length' , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , 'key_length' , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : int = getattr(self.model_tester , 'chunk_length' , lowerCAmelCase_ )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
_SCREAMING_SNAKE_CASE : List[str] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def A ( self ) -> Any:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
_SCREAMING_SNAKE_CASE : int = model_class(lowerCAmelCase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
_SCREAMING_SNAKE_CASE : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
_SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase_ )
self.assertTrue(outputs_dict is not None )
def lowercase__ ( ):
_SCREAMING_SNAKE_CASE : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def A ( self ) -> str:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def A ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[int] = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
_SCREAMING_SNAKE_CASE : List[str] = self.default_image_processor
_SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img()
_SCREAMING_SNAKE_CASE : int = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_SCREAMING_SNAKE_CASE : Dict = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def A ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.default_image_processor
_SCREAMING_SNAKE_CASE : List[str] = prepare_img()
_SCREAMING_SNAKE_CASE : List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_SCREAMING_SNAKE_CASE : List[Any] = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Dict = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 621 | 0 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE = parse(importlib.metadata.version('torch'))
def lowercase_ ( __A : Union[str, Version] , __A : str , __A : str ) -> Union[str, Any]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase : Any =STR_OPERATION_TO_FUNC[operation]
if isinstance(__A , __A ):
lowercase : List[Any] =parse(importlib.metadata.version(__A ) )
return operation(__A , parse(__A ) )
def lowercase_ ( __A : str , __A : str ) -> Tuple:
"""simple docstring"""
return compare_versions(__A , __A , __A )
| 8 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE = parse(importlib.metadata.version('torch'))
def lowercase_ ( __A : Union[str, Version] , __A : str , __A : str ) -> Union[str, Any]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase : Any =STR_OPERATION_TO_FUNC[operation]
if isinstance(__A , __A ):
lowercase : List[Any] =parse(importlib.metadata.version(__A ) )
return operation(__A , parse(__A ) )
def lowercase_ ( __A : str , __A : str ) -> Tuple:
"""simple docstring"""
return compare_versions(__A , __A , __A )
| 8 | 1 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _snake_case (_snake_case : Union[str, Any]) -> int:
_lowercase =r'''\w+[.]\d+'''
_lowercase =re.findall(_a , _a)
for pat in pats:
_lowercase =key.replace(_a , '_'.join(pat.split('.')))
return key
def _snake_case (_snake_case : str , _snake_case : List[Any] , _snake_case : Dict) -> int:
_lowercase =pt_tuple_key[:-1] + ('''scale''',)
if (
any('norm' in str_ for str_ in pt_tuple_key)
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_lowercase =pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_lowercase =pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_lowercase =pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_lowercase =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_lowercase =pt_tensor.transpose(2 , 3 , 1 , 0)
return renamed_pt_tuple_key, pt_tensor
# linear layer
_lowercase =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
_lowercase =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_lowercase =pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_lowercase =pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _snake_case (_snake_case : int , _snake_case : Tuple , _snake_case : int=42) -> Optional[int]:
# Step 1: Convert pytorch tensor to numpy
_lowercase ={k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_lowercase =flax_model.init_weights(PRNGKey(_a))
_lowercase =flatten_dict(_a)
_lowercase ={}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_lowercase =rename_key(_a)
_lowercase =tuple(renamed_pt_key.split('.'))
# Correctly rename weight parameters
_lowercase =rename_key_and_reshape_tensor(_a , _a , _a)
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''')
# also add unexpected weight so that warning is thrown
_lowercase =jnp.asarray(_a)
return unflatten_dict(_a)
| 181 |
"""simple docstring"""
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : List[str] = 16
lowercase__ : Optional[Any] = 32
def __lowercase ( _a , _a = 16 ):
snake_case_ : List[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case_ : Dict = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_a ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_a , max_length=_a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : Optional[Any] = datasets.map(
_a , batched=_a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : Dict = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
snake_case_ : Optional[Any] = 8
else:
snake_case_ : Union[str, Any] = None
return tokenizer.pad(
_a , padding='''longest''' , max_length=_a , pad_to_multiple_of=_a , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case_ : int = DataLoader(
tokenized_datasets['''train'''] , shuffle=_a , collate_fn=_a , batch_size=_a , drop_last=_a )
snake_case_ : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_a , collate_fn=_a , batch_size=_a , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def __lowercase ( _a , _a ):
# Initialize accelerator
snake_case_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : Union[str, Any] = config['''lr''']
snake_case_ : str = int(config['''num_epochs'''] )
snake_case_ : Optional[Any] = int(config['''seed'''] )
snake_case_ : Tuple = int(config['''batch_size'''] )
snake_case_ : str = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
snake_case_ : Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case_ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
snake_case_ : Any = MAX_GPU_BATCH_SIZE
set_seed(_a )
snake_case_, snake_case_ : Union[str, Any] = get_dataloaders(_a , _a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : int = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : Any = AdamW(params=model.parameters() , lr=_a )
# Instantiate scheduler
snake_case_ : Any = get_linear_schedule_with_warmup(
optimizer=_a , num_warmup_steps=100 , num_training_steps=(len(_a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = accelerator.prepare(
_a , _a , _a , _a , _a )
# Now we train the model
for epoch in range(_a ):
model.train()
for step, batch in enumerate(_a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ : List[Any] = model(**_a )
snake_case_ : Optional[int] = outputs.loss
snake_case_ : int = loss / gradient_accumulation_steps
accelerator.backward(_a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : int = model(**_a )
snake_case_ : Optional[Any] = outputs.logits.argmax(dim=-1 )
snake_case_, snake_case_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_a , references=_a , )
snake_case_ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _a )
def __lowercase ( ):
snake_case_ : Dict = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_a , default=_a , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
snake_case_ : int = parser.parse_args()
snake_case_ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_a , _a )
if __name__ == "__main__":
main()
| 123 | 0 |
UpperCAmelCase_ = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] )->list[str]:
_lowerCAmelCase = set()
# keep track of all the paths to be checked
_lowerCAmelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_lowerCAmelCase = queue.pop(0 )
# get the last node from the path
_lowerCAmelCase = path[-1]
if node not in explored:
_lowerCAmelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_lowerCAmelCase = list(_SCREAMING_SNAKE_CASE )
new_path.append(_SCREAMING_SNAKE_CASE )
queue.append(_SCREAMING_SNAKE_CASE )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_SCREAMING_SNAKE_CASE )
# in case there's no path between the 2 nodes
return []
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] )->int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_lowerCAmelCase = [start]
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE )
# Keep tab on distances from `start` node.
_lowerCAmelCase = {start: 0, target: -1}
while queue:
_lowerCAmelCase = queue.pop(0 )
if node == target:
_lowerCAmelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_SCREAMING_SNAKE_CASE )
queue.append(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4 | 718 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] )->Any: # noqa: E741
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
_lowerCAmelCase = [0] * n
_lowerCAmelCase = [False] * n
_lowerCAmelCase = [False] * n
def dfs(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase = True
_lowerCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase = True
else:
_lowerCAmelCase = min(low[at] , _SCREAMING_SNAKE_CASE )
return out_edge_count
for i in range(_SCREAMING_SNAKE_CASE ):
if not visited[i]:
_lowerCAmelCase = 0
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = out_edge_count > 1
for x in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_art[x] is True:
print(_SCREAMING_SNAKE_CASE )
# Adjacency list of graph
UpperCAmelCase_ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 664 | 0 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class A( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , A_ : Tuple , A_ : int=2 , A_ : Optional[int]=56 , A_ : Dict=True , A_ : List[Any]=True , A_ : Any=True , A_ : List[str]=True , A_ : Tuple=99 , A_ : Tuple=32 , A_ : List[Any]=2 , A_ : Any=2 , A_ : List[Any]=7 , A_ : Dict="gelu_new" , A_ : int=0.1 , A_ : int=0.1 , A_ : Union[str, Any]=512 , A_ : Tuple=16 , A_ : Union[str, Any]=2 , A_ : List[str]=0.02 , A_ : str=4 , A_ : Union[str, Any]="block_sparse" , A_ : Union[str, Any]=True , A_ : Optional[Any]=False , A_ : Tuple=2 , A_ : Dict=3 , ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_attention_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_choices
lowerCamelCase_ = rescale_embeddings
lowerCamelCase_ = attention_type
lowerCamelCase_ = use_bias
lowerCamelCase_ = block_size
lowerCamelCase_ = num_random_blocks
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_attention_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
super().test_hidden_states_output()
@slow
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase_ = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase_ = model_class(lowerCamelCase_ )
@jax.jit
def model_jitted(A_ : Dict , A_ : int=None , **A_ : List[str] ):
return model(input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ , **lowerCamelCase_ )
with self.subTest('JIT Enabled' ):
lowerCamelCase_ = model_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCamelCase_ = model_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def a__ ( self : int , A_ : Any , A_ : str , A_ : Union[str, Any] , A_ : int=1E-5 , A_ : Any="outputs" , A_ : Any=None ) -> Optional[int]:
"""simple docstring"""
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
| 70 |
from __future__ import annotations
__lowerCAmelCase = []
def _lowercase ( a__ : list[list[int]] , a__ : int , a__ : int ) -> bool:
"""simple docstring"""
for i in range(len(a__ ) ):
if board[row][i] == 1:
return False
for i in range(len(a__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(a__ , -1 , -1 ) , range(a__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(a__ , -1 , -1 ) , range(a__ , len(a__ ) ) ):
if board[i][j] == 1:
return False
return True
def _lowercase ( a__ : list[list[int]] , a__ : int ) -> bool:
"""simple docstring"""
if row >= len(a__ ):
solution.append(a__ )
printboard(a__ )
print()
return True
for i in range(len(a__ ) ):
if is_safe(a__ , a__ , a__ ):
_UpperCamelCase = 1
solve(a__ , row + 1 )
_UpperCamelCase = 0
return False
def _lowercase ( a__ : list[list[int]] ) -> None:
"""simple docstring"""
for i in range(len(a__ ) ):
for j in range(len(a__ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
__lowerCAmelCase = 8
__lowerCAmelCase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 147 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int =ViTImageProcessor if is_vision_available() else None
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
__lowerCAmelCase = (3, 32, 1_28)
__lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
__lowerCAmelCase = dict(zip(__a , range(len(__a ) ) ) )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
__lowerCAmelCase = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 1_28},
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__a , __a )
def snake_case ( self , **__a ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
__lowerCAmelCase = Image.fromarray(np.moveaxis(__a , 0 , -1 ) )
return image_input
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__lowerCAmelCase = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__lowerCAmelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__a , return_tensors="np" )
__lowerCAmelCase = processor(images=__a , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "test"
__lowerCAmelCase = processor(text=__a )
__lowerCAmelCase = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "test"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.char_decode(__a )
__lowerCAmelCase = tokenizer.batch_decode(__a )
__lowerCAmelCase = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = None
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = torch.randn(1 , 27 , 38 )
__lowerCAmelCase = torch.randn(1 , 27 , 5_02_57 )
__lowerCAmelCase = torch.randn(1 , 27 , 3_05_22 )
__lowerCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 701 |
"""simple docstring"""
from itertools import product
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = sides_number
__lowerCAmelCase = max_face_number * dice_number
__lowerCAmelCase = [0] * (max_total + 1)
__lowerCAmelCase = 1
__lowerCAmelCase = range(_UpperCamelCase , max_face_number + 1 )
for dice_numbers in product(_UpperCamelCase , repeat=_UpperCamelCase ):
__lowerCAmelCase = sum(_UpperCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__lowerCAmelCase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__lowerCAmelCase = 0
__lowerCAmelCase = 9
__lowerCAmelCase = 4 * 9
__lowerCAmelCase = 6
for peter_total in range(_UpperCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__lowerCAmelCase = (4**9) * (6**6)
__lowerCAmelCase = peter_wins_count / total_games_number
__lowerCAmelCase = round(_UpperCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''')
| 282 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( lowercase_ : list[list[int]] ) -> bool:
'''simple docstring'''
lowercase =len(_UpperCamelCase )
# We need to create solution object to save path.
lowercase =[[0 for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )]
lowercase =run_maze(_UpperCamelCase , 0 , 0 , _UpperCamelCase )
if solved:
print('''\n'''.join(str(_UpperCamelCase ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def UpperCamelCase ( lowercase_ : list[list[int]] , lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ) -> bool:
'''simple docstring'''
lowercase =len(_UpperCamelCase )
# Final check point.
if i == j == (size - 1):
lowercase =1
return True
lowercase =(not i < 0) and (not j < 0) # Check lower bounds
lowercase =(i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowercase =(not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowercase =1
# check for directions
if (
run_maze(_UpperCamelCase , i + 1 , _UpperCamelCase , _UpperCamelCase )
or run_maze(_UpperCamelCase , _UpperCamelCase , j + 1 , _UpperCamelCase )
or run_maze(_UpperCamelCase , i - 1 , _UpperCamelCase , _UpperCamelCase )
or run_maze(_UpperCamelCase , _UpperCamelCase , j - 1 , _UpperCamelCase )
):
return True
lowercase =0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCAmelCase : str = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCAmelCase : Tuple = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
UpperCAmelCase : List[Any] = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["""input_ids""", """attention_mask"""]
__a = BartTokenizer
def __init__( self : int , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[str]=None , UpperCamelCase : Tuple="replace" , UpperCamelCase : Optional[int]="<s>" , UpperCamelCase : str="</s>" , UpperCamelCase : str="</s>" , UpperCamelCase : Optional[Any]="<s>" , UpperCamelCase : Optional[int]="<unk>" , UpperCamelCase : Dict="<pad>" , UpperCamelCase : Any="<mask>" , UpperCamelCase : Optional[Any]=False , UpperCamelCase : Tuple=True , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , UpperCamelCase , tokenizer_file=UpperCamelCase , errors=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase , **UpperCamelCase , )
__UpperCAmelCase : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase ) != add_prefix_space:
__UpperCAmelCase : List[str] = getattr(UpperCamelCase , pre_tok_state.pop("""type""" ) )
__UpperCAmelCase : int = add_prefix_space
__UpperCAmelCase : List[Any] = pre_tok_class(**UpperCamelCase )
__UpperCAmelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__UpperCAmelCase : Union[str, Any] = """post_processor"""
__UpperCAmelCase : Union[str, Any] = getattr(self.backend_tokenizer , UpperCamelCase , UpperCamelCase )
if tokenizer_component_instance:
__UpperCAmelCase : List[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__UpperCAmelCase : int = tuple(state["""sep"""] )
if "cls" in state:
__UpperCAmelCase : Optional[int] = tuple(state["""cls"""] )
__UpperCAmelCase : int = False
if state.get("""add_prefix_space""" , UpperCamelCase ) != add_prefix_space:
__UpperCAmelCase : Dict = add_prefix_space
__UpperCAmelCase : Optional[int] = True
if state.get("""trim_offsets""" , UpperCamelCase ) != trim_offsets:
__UpperCAmelCase : str = trim_offsets
__UpperCAmelCase : int = True
if changes_to_apply:
__UpperCAmelCase : List[str] = getattr(UpperCamelCase , state.pop("""type""" ) )
__UpperCAmelCase : Tuple = component_class(**UpperCamelCase )
setattr(self.backend_tokenizer , UpperCamelCase , UpperCamelCase )
@property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Dict = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else value
__UpperCAmelCase : Any = value
def lowerCamelCase__ ( self : Optional[Any] , *UpperCamelCase : int , **UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : str = kwargs.get("""is_split_into_words""" , UpperCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = kwargs.get("""is_split_into_words""" , UpperCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : int , UpperCamelCase : Dict=None ):
'''simple docstring'''
__UpperCAmelCase : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 139 | 0 |
"""simple docstring"""
from typing import Any
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) -> list:
_validation(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
# Creates data structures and fill initial step
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
for state in states_space:
__SCREAMING_SNAKE_CASE = observations_space[0]
__SCREAMING_SNAKE_CASE = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__SCREAMING_SNAKE_CASE = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(UpperCAmelCase__ ) ):
__SCREAMING_SNAKE_CASE = observations_space[o]
__SCREAMING_SNAKE_CASE = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__SCREAMING_SNAKE_CASE = ''''''
__SCREAMING_SNAKE_CASE = -1
for k_state in states_space:
__SCREAMING_SNAKE_CASE = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__SCREAMING_SNAKE_CASE = probability
__SCREAMING_SNAKE_CASE = k_state
# Update probabilities and pointers dicts
__SCREAMING_SNAKE_CASE = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__SCREAMING_SNAKE_CASE = arg_max
# The final observation
__SCREAMING_SNAKE_CASE = observations_space[len(UpperCAmelCase__ ) - 1]
# argmax for given final observation
__SCREAMING_SNAKE_CASE = ''''''
__SCREAMING_SNAKE_CASE = -1
for k_state in states_space:
__SCREAMING_SNAKE_CASE = probabilities[(k_state, final_observation)]
if probability > max_probability:
__SCREAMING_SNAKE_CASE = probability
__SCREAMING_SNAKE_CASE = k_state
__SCREAMING_SNAKE_CASE = arg_max
# Process pointers backwards
__SCREAMING_SNAKE_CASE = last_state
__SCREAMING_SNAKE_CASE = []
for o in range(len(UpperCAmelCase__ ) - 1 , -1 , -1 ):
result.append(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = pointers[previous, observations_space[o]]
result.reverse()
return result
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) -> None:
_validate_not_empty(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
_validate_lists(UpperCAmelCase__ , UpperCAmelCase__ )
_validate_dicts(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> None:
_validate_list(UpperCAmelCase__ , '''observations_space''' )
_validate_list(UpperCAmelCase__ , '''states_space''' )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> None:
if not isinstance(_object , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = f"""{var_name} must be a list"""
raise ValueError(UpperCAmelCase__ )
else:
for x in _object:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = f"""{var_name} must be a list of strings"""
raise ValueError(UpperCAmelCase__ )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) -> None:
_validate_dict(UpperCAmelCase__ , '''initial_probabilities''' , UpperCAmelCase__ )
_validate_nested_dict(UpperCAmelCase__ , '''transition_probabilities''' )
_validate_nested_dict(UpperCAmelCase__ , '''emission_probabilities''' )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> None:
_validate_dict(_object , UpperCAmelCase__ , UpperCAmelCase__ )
for x in _object.values():
_validate_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False ) -> None:
if not isinstance(_object , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = f"""{var_name} must be a dict"""
raise ValueError(UpperCAmelCase__ )
if not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for x in _object ):
__SCREAMING_SNAKE_CASE = f"""{var_name} all keys must be strings"""
raise ValueError(UpperCAmelCase__ )
if not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for x in _object.values() ):
__SCREAMING_SNAKE_CASE = '''nested dictionary ''' if nested else ''''''
__SCREAMING_SNAKE_CASE = f"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(UpperCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 702 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ =get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class A__( __magic_name__ , unittest.TestCase ):
lowerCAmelCase = XLMRobertaTokenizer
lowerCAmelCase = XLMRobertaTokenizerFast
lowerCAmelCase = True
lowerCAmelCase = True
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''<pad>'''
__SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_02 )
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def _a ( self : int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _a ( self : int ) -> Tuple:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__SCREAMING_SNAKE_CASE = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__SCREAMING_SNAKE_CASE = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
@cached_property
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def _a ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__SCREAMING_SNAKE_CASE , f.name )
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(f.name , keep_accents=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = pickle.dumps(__SCREAMING_SNAKE_CASE )
pickle.loads(__SCREAMING_SNAKE_CASE )
def _a ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = '''I was born in 92000, and this is falsé.'''
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def _a ( self : Any ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''Hello World!'''
__SCREAMING_SNAKE_CASE = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) )
@slow
def _a ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
__SCREAMING_SNAKE_CASE = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) )
@slow
def _a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {'''input_ids''': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 690 | 0 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : List[str] = int(__lowerCAmelCase )
assert noofclusters < len(__lowerCAmelCase )
# Find out the dimensionality
SCREAMING_SNAKE_CASE__ : Optional[int] = len(vectors[0] )
# Will help select random centroids from among the available vectors
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(range(len(__lowerCAmelCase ) ) )
shuffle(__lowerCAmelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
SCREAMING_SNAKE_CASE__ : int = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
SCREAMING_SNAKE_CASE__ : Tuple = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
SCREAMING_SNAKE_CASE__ : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(__lowerCAmelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
SCREAMING_SNAKE_CASE__ : Tuple = tf.placeholder("""float64""" , [dim] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for centroid in centroids:
cent_assigns.append(tf.assign(__lowerCAmelCase , __lowerCAmelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
SCREAMING_SNAKE_CASE__ : Dict = [tf.Variable(0 ) for i in range(len(__lowerCAmelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
SCREAMING_SNAKE_CASE__ : int = tf.placeholder("""int32""" )
SCREAMING_SNAKE_CASE__ : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(__lowerCAmelCase , __lowerCAmelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
SCREAMING_SNAKE_CASE__ : str = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
SCREAMING_SNAKE_CASE__ : str = tf.reduce_mean(__lowerCAmelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.placeholder("""float""" , [dim] )
SCREAMING_SNAKE_CASE__ : Dict = tf.placeholder("""float""" , [dim] )
SCREAMING_SNAKE_CASE__ : List[str] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__lowerCAmelCase , __lowerCAmelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.placeholder("""float""" , [noofclusters] )
SCREAMING_SNAKE_CASE__ : Any = tf.argmin(__lowerCAmelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(__lowerCAmelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
SCREAMING_SNAKE_CASE__ : Tuple = 100
for _ in range(__lowerCAmelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(__lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
SCREAMING_SNAKE_CASE__ : List[str] = [
sess.run(__lowerCAmelCase , feed_dict={va: vect, va: sess.run(__lowerCAmelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
SCREAMING_SNAKE_CASE__ : List[str] = sess.run(
__lowerCAmelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(__lowerCAmelCase ):
# Collect all the vectors assigned to this cluster
SCREAMING_SNAKE_CASE__ : Dict = [
vectors[i]
for i in range(len(__lowerCAmelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
SCREAMING_SNAKE_CASE__ : List[str] = sess.run(
__lowerCAmelCase , feed_dict={mean_input: array(__lowerCAmelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
SCREAMING_SNAKE_CASE__ : Dict = sess.run(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = sess.run(__lowerCAmelCase )
return centroids, assignments
| 680 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
a :Union[str, Any] = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :str = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :str = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
a :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 680 | 1 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = CodeGenTokenizer
lowerCamelCase__ : Tuple = CodeGenTokenizerFast
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[str] = {'add_prefix_space': True}
lowerCamelCase__ : Union[str, Any] = False
def a__ (self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
lowerCamelCase__ : str = dict(zip(lowerCamelCase_, range(len(lowerCamelCase_ ) ) ) )
lowerCamelCase__ : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCamelCase__ : Optional[int] = {'unk_token': '<unk>'}
lowerCamelCase__ : List[str] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase_ ) )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = 'lower newer'
lowerCamelCase__ : int = 'lower newer'
return input_text, output_text
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = CodeGenTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
lowerCamelCase__ : Dict = 'lower newer'
lowerCamelCase__ : str = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowerCamelCase__ : Tuple = tokenizer.tokenize(lowerCamelCase_, add_prefix_space=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : str = tokens + [tokenizer.unk_token]
lowerCamelCase__ : Union[str, Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ), lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Dict = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : str = 'lower newer'
# Testing tokenization
lowerCamelCase__ : Tuple = tokenizer.tokenize(lowerCamelCase_, add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
# Testing conversion to ids without special tokens
lowerCamelCase__ : Any = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_, add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
# Testing conversion to ids with special tokens
lowerCamelCase__ : Dict = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : Dict = tokenizer.encode(lowerCamelCase_, add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
# Testing the unknown token
lowerCamelCase__ : List[str] = tokens + [rust_tokenizer.unk_token]
lowerCamelCase__ : Optional[int] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCamelCase_ ), lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
pass
def a__ (self, lowerCamelCase_=1_5 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ : List[str] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_, **lowerCamelCase_ )
# Simple input
lowerCamelCase__ : Optional[int] = 'This is a simple input'
lowerCamelCase__ : Any = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase__ : Tuple = ('This is a simple input', 'This is a pair')
lowerCamelCase__ : Dict = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowerCamelCase_, tokenizer_r.encode, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length' )
# Simple input
self.assertRaises(lowerCamelCase_, tokenizer_r.encode_plus, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length' )
# Simple input
self.assertRaises(
lowerCamelCase_, tokenizer_r.batch_encode_plus, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length', )
# Pair input
self.assertRaises(lowerCamelCase_, tokenizer_r.encode, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length' )
# Pair input
self.assertRaises(lowerCamelCase_, tokenizer_r.encode_plus, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length' )
# Pair input
self.assertRaises(
lowerCamelCase_, tokenizer_r.batch_encode_plus, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length', )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname, pad_token='<pad>' )
# Simple input
lowerCamelCase__ : Optional[Any] = 'This is a simple input'
lowerCamelCase__ : Any = ['This is a simple input looooooooong', 'This is a simple input']
lowerCamelCase__ : Tuple = ('This is a simple input', 'This is a pair')
lowerCamelCase__ : List[str] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
lowerCamelCase__ : Union[str, Any] = tokenizer.pad_token_id
lowerCamelCase__ : Union[str, Any] = tokenizer(lowerCamelCase_, padding='max_length', max_length=3_0, return_tensors='np' )
lowerCamelCase__ : int = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, truncate=lowerCamelCase_, return_tensors='np' )
lowerCamelCase__ : Optional[int] = tokenizer(*lowerCamelCase_, padding='max_length', max_length=6_0, return_tensors='np' )
lowerCamelCase__ : Optional[int] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, truncate=lowerCamelCase_, return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1], 3_0 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1], 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1], 6_0 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1], 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = '$$$'
lowerCamelCase__ : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname, bos_token=lowerCamelCase_, add_bos_token=lowerCamelCase_ )
lowerCamelCase__ : List[str] = 'This is a simple input'
lowerCamelCase__ : Any = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase__ : Dict = tokenizer.bos_token_id
lowerCamelCase__ : Optional[int] = tokenizer(lowerCamelCase_ )
lowerCamelCase__ : int = tokenizer(lowerCamelCase_ )
self.assertEqual(out_s.input_ids[0], lowerCamelCase_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCamelCase__ : Tuple = tokenizer.decode(out_s.input_ids )
lowerCamelCase__ : Dict = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], lowerCamelCase_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
lowerCamelCase__ : str = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
lowerCamelCase__ : int = '\nif len_a > len_b: result = a\nelse: result = b'
lowerCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
lowerCamelCase__ : List[Any] = tokenizer.decode(lowerCamelCase_, truncate_before_pattern=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
pass
| 696 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = analyze_text(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : Tuple = single_char_strings[ch]
lowerCamelCase__ : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowerCamelCase__ : Dict = sum(two_char_strings.values() )
lowerCamelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : int = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : int = two_char_strings[sequence]
lowerCamelCase__ : Tuple = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 696 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''image_processor''', '''tokenizer''']
lowerCAmelCase = '''LayoutLMv2ImageProcessor'''
lowerCAmelCase = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__A : str = kwargs.pop('feature_extractor')
__A : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_UpperCAmelCase , _UpperCAmelCase)
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.')
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.')
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.')
# first, apply the image processor
__A : Dict = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Optional[int] = [text] # add batch dimension (as the image processor always adds a batch dimension)
__A : Dict = features['words']
__A : Any = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
__A : Dict = features.pop('pixel_values')
if return_overflowing_tokens is True:
__A : str = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'])
__A : str = images
return encoded_inputs
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(_UpperCAmelCase) != len(_UpperCAmelCase):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F' {len(_UpperCAmelCase)} and {len(_UpperCAmelCase)}')
return images_with_overflow
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor | 8 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = UniSpeechSatForSequenceClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["projector.weight"]
a_ = downstream_dict["projector.bias"]
a_ = downstream_dict["model.post_net.linear.weight"]
a_ = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["model.linear.weight"]
a_ = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = UniSpeechSatForXVector.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["connector.weight"]
a_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = torch.load(lowercase__ ,map_location="cpu" )
a_ = checkpoint["Downstream"]
a_ = UniSpeechSatConfig.from_pretrained(lowercase__ )
a_ = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ ,return_attention_mask=lowercase__ ,do_normalize=lowercase__ )
a_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ = convert_classification(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ = convert_diarization(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForXVector" ):
a_ = convert_xvector(lowercase__ ,lowercase__ ,lowercase__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 685 | 0 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowerCamelCase__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__="None" , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> Any:
"""simple docstring"""
_UpperCamelCase :int =parent
_UpperCamelCase :Any =batch_size
_UpperCamelCase :List[str] =seq_length
_UpperCamelCase :Dict =is_training
_UpperCamelCase :Optional[Any] =use_input_mask
_UpperCamelCase :Optional[Any] =use_token_type_ids
_UpperCamelCase :str =use_labels
_UpperCamelCase :List[str] =vocab_size
_UpperCamelCase :str =hidden_size
_UpperCamelCase :List[str] =num_hidden_layers
_UpperCamelCase :str =num_attention_heads
_UpperCamelCase :Dict =intermediate_size
_UpperCamelCase :Union[str, Any] =hidden_act
_UpperCamelCase :str =hidden_dropout_prob
_UpperCamelCase :Union[str, Any] =attention_probs_dropout_prob
_UpperCamelCase :Union[str, Any] =max_position_embeddings
_UpperCamelCase :List[str] =type_vocab_size
_UpperCamelCase :List[Any] =type_sequence_label_size
_UpperCamelCase :Optional[Any] =initializer_range
_UpperCamelCase :int =num_labels
_UpperCamelCase :Optional[Any] =num_choices
_UpperCamelCase :Any =relative_attention
_UpperCamelCase :str =position_biased_input
_UpperCamelCase :Any =pos_att_type
_UpperCamelCase :List[Any] =scope
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase :Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase :List[Any] =None
if self.use_input_mask:
_UpperCamelCase :Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase :List[str] =None
if self.use_token_type_ids:
_UpperCamelCase :List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase :Tuple =None
_UpperCamelCase :List[Any] =None
_UpperCamelCase :str =None
if self.use_labels:
_UpperCamelCase :Any =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase :List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase :List[Any] =DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
"""simple docstring"""
_UpperCamelCase :List[Any] =TFDebertaVaModel(config=UpperCamelCase_ )
_UpperCamelCase :Dict ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase :int =[input_ids, input_mask]
_UpperCamelCase :Any =model(UpperCamelCase_ )
_UpperCamelCase :Any =model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] =TFDebertaVaForMaskedLM(config=UpperCamelCase_ )
_UpperCamelCase :Optional[int] ={
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_UpperCamelCase :Any =model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :List[Any] =self.num_labels
_UpperCamelCase :Any =TFDebertaVaForSequenceClassification(config=UpperCamelCase_ )
_UpperCamelCase :List[str] ={
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_UpperCamelCase :Optional[Any] =model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] =self.num_labels
_UpperCamelCase :str =TFDebertaVaForTokenClassification(config=UpperCamelCase_ )
_UpperCamelCase :List[str] ={
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_UpperCamelCase :Dict =model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] =TFDebertaVaForQuestionAnswering(config=UpperCamelCase_ )
_UpperCamelCase :Tuple ={
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_UpperCamelCase :Union[str, Any] =model(UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :Tuple =self.prepare_config_and_inputs()
(
_UpperCamelCase
) :int =config_and_inputs
_UpperCamelCase :int ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( __snake_case , __snake_case , unittest.TestCase ):
__UpperCAmelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :List[Any] =TFDebertaVaModelTester(self )
_UpperCamelCase :Any =ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase :int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase :Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase :Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase :Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase :Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ )
@slow
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] =TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(UpperCamelCase_ )
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
pass
@slow
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :Any =TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
_UpperCamelCase :Optional[Any] =tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_UpperCamelCase :Dict =tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase :List[str] =model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
_UpperCamelCase :Any =tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCamelCase_ , atol=1e-4 ) | 710 | '''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
_lowerCamelCase : str = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class lowerCamelCase__ ( tr.AbstractTransform ):
def __init__( self , lowerCAmelCase__ = " " ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :Dict =sentence_delimiter
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> Dict:
"""simple docstring"""
return list(lowerCAmelCase__ )
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :int =[]
for sent_idx, sentence in enumerate(lowerCAmelCase__ ):
chars.extend(self.process_string(lowerCAmelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCAmelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
_lowerCamelCase : Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
_lowerCamelCase : str = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
_lowerCamelCase : int = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
_lowerCamelCase : Tuple = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
_lowerCamelCase : Optional[int] = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[int]:
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )["wer"]
_UpperCamelCase :str =0
_UpperCamelCase :Tuple =0
for prediction, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase :Optional[int] =jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 512 | 0 |
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod() | 483 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase_ ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase__ ( self ):
"""simple docstring"""
a_ = 1
a_ = 3
a_ = (32, 32)
a_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_UpperCAmelCase )
return image
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
a_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
a_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
a_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(_UpperCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
def extract(*_UpperCAmelCase , **_UpperCAmelCase ):
class lowercase_ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
a_ = torch.ones([0] )
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
self.pixel_values.to(_UpperCAmelCase )
return self
return Out()
return extract
def lowercase__ ( self ):
"""simple docstring"""
a_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ = self.dummy_cond_unet
a_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
a_ = self.dummy_vae
a_ = self.dummy_text_encoder
a_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
a_ = StableDiffusionPipeline(
unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=self.dummy_extractor , )
a_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
a_ = """A painting of a squirrel eating a burger"""
a_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
a_ = sd_pipe([prompt] , generator=_UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
a_ = output.images
a_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
a_ = sd_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=_UpperCAmelCase , )[0]
a_ = image[0, -3:, -3:, -1]
a_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a_ = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
a_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ = self.dummy_cond_unet
a_ = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
a_ = self.dummy_vae
a_ = self.dummy_text_encoder
a_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
a_ = StableDiffusionPipeline(
unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=self.dummy_extractor , )
a_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
a_ = """A painting of a squirrel eating a burger"""
a_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
a_ = sd_pipe([prompt] , generator=_UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
a_ = output.images
a_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
a_ = sd_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=_UpperCAmelCase , )[0]
a_ = image[0, -3:, -3:, -1]
a_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a_ = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
a_ = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert isinstance(pipe.scheduler , _UpperCAmelCase )
assert pipe.safety_checker is None
a_ = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
a_ = StableDiffusionPipeline.from_pretrained(_UpperCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
a_ = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.dummy_cond_unet
a_ = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
a_ = self.dummy_vae
a_ = self.dummy_text_encoder
a_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
a_ = unet.half()
a_ = vae.half()
a_ = bert.half()
# make sure here that pndm scheduler skips prk
a_ = StableDiffusionPipeline(
unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=self.dummy_extractor , )
a_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
a_ = """A painting of a squirrel eating a burger"""
a_ = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase_ ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
a_ = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=_UpperCAmelCase )
a_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
a_ = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
a_ = 4_003_660_346
a_ = 7
# without safety guidance (sld_guidance_scale = 0)
a_ = torch.manual_seed(_UpperCAmelCase )
a_ = sd_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
a_ = output.images
a_ = image[0, -3:, -3:, -1]
a_ = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
a_ = torch.manual_seed(_UpperCAmelCase )
a_ = sd_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ = output.images
a_ = image[0, -3:, -3:, -1]
a_ = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
a_ = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=_UpperCAmelCase )
a_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
a_ = """padme amidala taking a bath artwork, safe for work, no nudity"""
a_ = 2_734_971_755
a_ = 7
a_ = torch.manual_seed(_UpperCAmelCase )
a_ = sd_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
a_ = output.images
a_ = image[0, -3:, -3:, -1]
a_ = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
a_ = torch.manual_seed(_UpperCAmelCase )
a_ = sd_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ = output.images
a_ = image[0, -3:, -3:, -1]
a_ = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
a_ = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
a_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
a_ = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
a_ = 1_044_355_234
a_ = 12
a_ = torch.manual_seed(_UpperCAmelCase )
a_ = sd_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
a_ = output.images
a_ = image[0, -3:, -3:, -1]
a_ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
a_ = torch.manual_seed(_UpperCAmelCase )
a_ = sd_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ = output.images
a_ = image[0, -3:, -3:, -1]
a_ = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 483 | 1 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
_lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class UpperCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ):
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self , **_lowerCAmelCase ):
return {}, {}, {}
def lowerCAmelCase__ ( self , _lowerCAmelCase ):
a =load_image(_lowerCAmelCase )
a =image.size
a =self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase__ ( self , _lowerCAmelCase ):
a =self.model(**_lowerCAmelCase )
return model_outputs
def lowerCAmelCase__ ( self , _lowerCAmelCase ):
a =model_outputs.predicted_depth
a =torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=_lowerCAmelCase )
a =prediction.squeeze().cpu().numpy()
a =(output * 255 / np.max(_lowerCAmelCase )).astype("""uint8""" )
a =Image.fromarray(_lowerCAmelCase )
a ={}
a =predicted_depth
a =depth
return output_dict
| 321 |
_lowerCamelCase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def lowerCamelCase ( )-> None:
"""simple docstring"""
a =input("""Enter message: """ )
a =input("""Enter key [alphanumeric]: """ )
a =input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
a ="""encrypt"""
a =encrypt_message(UpperCAmelCase_ , UpperCAmelCase_ )
elif mode.lower().startswith("""d""" ):
a ="""decrypt"""
a =decrypt_message(UpperCAmelCase_ , UpperCAmelCase_ )
print(F'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase_ )
def lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str )-> str:
"""simple docstring"""
return translate_message(UpperCAmelCase_ , UpperCAmelCase_ , """encrypt""" )
def lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str )-> str:
"""simple docstring"""
return translate_message(UpperCAmelCase_ , UpperCAmelCase_ , """decrypt""" )
def lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str )-> str:
"""simple docstring"""
a =[]
a =0
a =key.upper()
for symbol in message:
a =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase_ ):
a =0
else:
translated.append(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 321 | 1 |
from __future__ import annotations
import math
from collections.abc import Callable
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 100 , ) -> float:
snake_case__ = x_start
snake_case__ = fnc(__lowerCAmelCase )
snake_case__ = 0.0
for _ in range(__lowerCAmelCase ):
# Approximates curve as a sequence of linear lines and sums their length
snake_case__ = (x_end - x_start) / steps + xa
snake_case__ = fnc(__lowerCAmelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
snake_case__ = xa
snake_case__ = fxa
return length
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowerCamelCase__ : List[str] = 1_0
while i <= 1_0_0_0_0_0:
print(F"""With {i} steps: {line_length(f, -1_0, 1_0, i)}""")
i *= 1_0
| 33 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __magic_name__ :
'''simple docstring'''
__lowercase : int = BlenderbotConfig
__lowercase : Any = {}
__lowercase : Optional[Any] = 'gelu'
def __init__( self:Tuple , _a:Optional[Any] , _a:Optional[Any]=13 , _a:Tuple=7 , _a:Union[str, Any]=True , _a:int=False , _a:int=99 , _a:Optional[int]=32 , _a:List[str]=2 , _a:List[str]=4 , _a:List[Any]=37 , _a:Any=0.1 , _a:int=0.1 , _a:List[Any]=20 , _a:List[str]=2 , _a:int=1 , _a:Dict=0 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = eos_token_id
snake_case__ = pad_token_id
snake_case__ = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ = prepare_blenderbot_inputs_dict(_a , _a , _a )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[Any] , _a:int ):
snake_case__ = TFBlenderbotModel(config=_a ).get_decoder()
snake_case__ = inputs_dict['''input_ids''']
snake_case__ = input_ids[:1, :]
snake_case__ = inputs_dict['''attention_mask'''][:1, :]
snake_case__ = inputs_dict['''head_mask''']
snake_case__ = 1
# first forward pass
snake_case__ = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a )
snake_case__ , snake_case__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case__ = model(_a , attention_mask=_a )[0]
snake_case__ = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case__ = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Tuple:
if attention_mask is None:
snake_case__ = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__lowercase : Any = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__lowercase : Tuple = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowercase : Any = True
__lowercase : int = False
__lowercase : int = False
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = TFBlenderbotModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
@require_tokenizers
@require_tf
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = ['My friends are cool but they eat too many carbs.']
__lowercase : Optional[int] = 'facebook/blenderbot-400M-distill'
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.tokenizer(self.src_text , return_tensors='''tf''' )
snake_case__ = self.model.generate(
model_inputs.input_ids , )
snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 33 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a__ : Union[str, Any] = logging.get_logger(__name__)
class lowercase ( __lowercase ):
"""simple docstring"""
snake_case_ = ['pixel_values']
def __init__( self : Optional[int] , a_ : bool = True , a_ : int = 32 , a_ : Dict=PILImageResampling.BILINEAR , a_ : bool = True , **a_ : Union[str, Any] , ):
"""simple docstring"""
lowerCamelCase__ = do_resize
lowerCamelCase__ = do_rescale
lowerCamelCase__ = size_divisor
lowerCamelCase__ = resample
super().__init__(**a_ )
def _UpperCamelCase ( self : List[str] , a_ : np.ndarray , a_ : int , a_ : str , a_ : Optional[ChannelDimension] = None , **a_ : List[str] ):
"""simple docstring"""
lowerCamelCase__ = get_image_size(a_ )
# Rounds the height and width down to the closest multiple of size_divisor
lowerCamelCase__ = height // size_divisor * size_divisor
lowerCamelCase__ = width // size_divisor * size_divisor
lowerCamelCase__ = resize(a_ , (new_h, new_w) , resample=a_ , data_format=a_ , **a_ )
return image
def _UpperCamelCase ( self : str , a_ : np.ndarray , a_ : float , a_ : Optional[ChannelDimension] = None , **a_ : Union[str, Any] ):
"""simple docstring"""
return rescale(image=a_ , scale=a_ , data_format=a_ , **a_ )
def _UpperCamelCase ( self : Union[str, Any] , a_ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , a_ : Optional[bool] = None , a_ : Optional[int] = None , a_ : Tuple=None , a_ : Optional[bool] = None , a_ : Optional[Union[TensorType, str]] = None , a_ : ChannelDimension = ChannelDimension.FIRST , **a_ : Dict , ):
"""simple docstring"""
lowerCamelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ = size_divisor if size_divisor is not None else self.size_divisor
lowerCamelCase__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
lowerCamelCase__ = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
lowerCamelCase__ = [to_numpy_array(a_ ) for img in images]
if do_resize:
lowerCamelCase__ = [self.resize(a_ , size_divisor=a_ , resample=a_ ) for image in images]
if do_rescale:
lowerCamelCase__ = [self.rescale(a_ , scale=1 / 2_55 ) for image in images]
lowerCamelCase__ = [to_channel_dimension_format(a_ , a_ ) for image in images]
lowerCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 706 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowercase :
"""simple docstring"""
def __init__( self : Optional[int] , a_ : list[tuple[float, float]] ):
"""simple docstring"""
lowerCamelCase__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowerCamelCase__ = len(a_ ) - 1
def _UpperCamelCase ( self : Union[str, Any] , a_ : float ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCamelCase__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , a_ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(a_ ) , 5 ) == 1
return output_values
def _UpperCamelCase ( self : int , a_ : float ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCamelCase__ = self.basis_function(a_ )
lowerCamelCase__ = 0.0
lowerCamelCase__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _UpperCamelCase ( self : str , a_ : float = 0.0_1 ):
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
lowerCamelCase__ = [] # x coordinates of points to plot
lowerCamelCase__ = [] # y coordinates of points to plot
lowerCamelCase__ = 0.0
while t <= 1:
lowerCamelCase__ = self.bezier_curve_function(a_ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowerCamelCase__ = [i[0] for i in self.list_of_points]
lowerCamelCase__ = [i[1] for i in self.list_of_points]
plt.plot(
a_ , a_ , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(a_ , a_ , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 235 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _lowerCAmelCase ( __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = StableDiffusionLatentUpscalePipeline
__lowerCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
__lowerCAmelCase : str = PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
__lowerCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCAmelCase : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowerCAmelCase : int = frozenset([] )
__lowerCAmelCase : Union[str, Any] = True
@property
def __lowerCamelCase ( self :int ):
snake_case__ : str = 1
snake_case__ : str = 4
snake_case__ : Any = (1_6, 1_6)
snake_case__ : str = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(__lowercase )
return image
def __lowerCamelCase ( self :Any ):
torch.manual_seed(0 )
snake_case__ : str = UNetaDConditionModel(
act_fn='''gelu''' ,attention_head_dim=8 ,norm_num_groups=__lowercase ,block_out_channels=[3_2, 3_2, 6_4, 6_4] ,time_cond_proj_dim=1_6_0 ,conv_in_kernel=1 ,conv_out_kernel=1 ,cross_attention_dim=3_2 ,down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) ,in_channels=8 ,mid_block_type=__lowercase ,only_cross_attention=__lowercase ,out_channels=5 ,resnet_time_scale_shift='''scale_shift''' ,time_embedding_type='''fourier''' ,timestep_post_act='''gelu''' ,up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') ,)
snake_case__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
snake_case__ : Any = EulerDiscreteScheduler(prediction_type='''sample''' )
snake_case__ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''quick_gelu''' ,projection_dim=5_1_2 ,)
snake_case__ : Any = CLIPTextModel(__lowercase )
snake_case__ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case__ : List[str] = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def __lowerCamelCase ( self :Dict ,__lowercase :Dict ,__lowercase :List[Any]=0 ):
if str(__lowercase ).startswith('''mps''' ):
snake_case__ : Tuple = torch.manual_seed(__lowercase )
else:
snake_case__ : Optional[Any] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
snake_case__ : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Tuple = '''cpu'''
snake_case__ : Any = self.get_dummy_components()
snake_case__ : Optional[Any] = self.pipeline_class(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : Dict = self.get_dummy_inputs(__lowercase )
snake_case__ : str = pipe(**__lowercase ).images
snake_case__ : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 2_5_6, 2_5_6, 3) )
snake_case__ : Dict = np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055] )
snake_case__ : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowercase ,1e-3 )
def __lowerCamelCase ( self :List[Any] ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def __lowerCamelCase ( self :str ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def __lowerCamelCase ( self :str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __lowerCamelCase ( self :Tuple ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def __lowerCamelCase ( self :int ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def __lowerCamelCase ( self :Dict ):
super().test_save_load_local(expected_max_difference=3e-3 )
def __lowerCamelCase ( self :List[str] ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __lowerCamelCase ( self :int ):
snake_case__ : Dict = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
snake_case__ : Dict = self.get_dummy_components()
snake_case__ : Tuple = self.pipeline_class(**__lowercase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : Dict = self.get_dummy_inputs(__lowercase )
snake_case__ : str = 2
snake_case__ : List[str] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
snake_case__ : Any = getattr(__lowercase ,scheduler_enum.name )
snake_case__ : Any = scheduler_cls.from_config(pipe.scheduler.config )
snake_case__ : List[Any] = pipe(**__lowercase )[0]
outputs.append(__lowercase )
assert check_same_shape(__lowercase )
@require_torch_gpu
@slow
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Optional[int] = torch.manual_seed(3_3 )
snake_case__ : Any = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ,torch_dtype=torch.floataa )
pipe.to('''cuda''' )
snake_case__ : Any = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' ,torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ : Union[str, Any] = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
snake_case__ : List[Any] = pipe(__lowercase ,generator=__lowercase ,output_type='''latent''' ).images
snake_case__ : Tuple = upscaler(
prompt=__lowercase ,image=__lowercase ,num_inference_steps=2_0 ,guidance_scale=0 ,generator=__lowercase ,output_type='''np''' ,).images[0]
snake_case__ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : int = torch.manual_seed(3_3 )
snake_case__ : Tuple = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' ,torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ : Union[str, Any] = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
snake_case__ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
snake_case__ : str = upscaler(
prompt=__lowercase ,image=__lowercase ,num_inference_steps=2_0 ,guidance_scale=0 ,generator=__lowercase ,output_type='''np''' ,).images[0]
snake_case__ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 252 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : Any = """data2vec-vision"""
def __init__( self :int ,__lowercase :Optional[int]=7_6_8 ,__lowercase :Any=1_2 ,__lowercase :Optional[Any]=1_2 ,__lowercase :List[Any]=3_0_7_2 ,__lowercase :str="gelu" ,__lowercase :Optional[int]=0.0 ,__lowercase :Optional[Any]=0.0 ,__lowercase :List[str]=0.02 ,__lowercase :Tuple=1e-1_2 ,__lowercase :Dict=2_2_4 ,__lowercase :Union[str, Any]=1_6 ,__lowercase :List[str]=3 ,__lowercase :Tuple=False ,__lowercase :Tuple=False ,__lowercase :Optional[int]=False ,__lowercase :Tuple=False ,__lowercase :List[Any]=0.1 ,__lowercase :Dict=0.1 ,__lowercase :str=True ,__lowercase :Dict=[3, 5, 7, 1_1] ,__lowercase :Dict=[1, 2, 3, 6] ,__lowercase :List[str]=True ,__lowercase :Tuple=0.4 ,__lowercase :str=2_5_6 ,__lowercase :Optional[Any]=1 ,__lowercase :Tuple=False ,__lowercase :int=2_5_5 ,**__lowercase :Optional[int] ,):
super().__init__(**__lowercase )
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : str = intermediate_size
snake_case__ : int = hidden_act
snake_case__ : Dict = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : str = layer_norm_eps
snake_case__ : Union[str, Any] = image_size
snake_case__ : int = patch_size
snake_case__ : Optional[int] = num_channels
snake_case__ : str = use_mask_token
snake_case__ : Union[str, Any] = use_absolute_position_embeddings
snake_case__ : List[str] = use_relative_position_bias
snake_case__ : List[str] = use_shared_relative_position_bias
snake_case__ : Optional[int] = layer_scale_init_value
snake_case__ : Tuple = drop_path_rate
snake_case__ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case__ : Dict = out_indices
snake_case__ : Any = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case__ : Union[str, Any] = use_auxiliary_head
snake_case__ : Optional[Any] = auxiliary_loss_weight
snake_case__ : Dict = auxiliary_channels
snake_case__ : Any = auxiliary_num_convs
snake_case__ : Any = auxiliary_concat_input
snake_case__ : Dict = semantic_loss_ignore_index
class a ( __lowerCamelCase ):
__lowerCAmelCase : Optional[Any] = version.parse("""1.11""" )
@property
def __lowerCamelCase ( self :str ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self :Dict ):
return 1e-4
| 252 | 1 |
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( lowercase_ : List[str] , lowercase_ : Dict ):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_ ( lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__SCREAMING_SNAKE_CASE : int = TextDatasetReader(lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_text_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def lowerCAmelCase_ ( lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : str = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE : Any = {'''text''': '''string'''}
__SCREAMING_SNAKE_CASE : Tuple = features.copy() if features else default_expected_features
__SCREAMING_SNAKE_CASE : int = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE : List[str] = TextDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_text_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_ ( lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE : Any = {'''text''': '''string'''}
__SCREAMING_SNAKE_CASE : Optional[int] = TextDatasetReader(lowercase_ , cache_dir=lowercase_ , split=lowercase_ ).read()
_check_text_dataset(lowercase_ , lowercase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase_ ( lowercase_ : str , lowercase_ : Dict , lowercase_ : Optional[Any] ):
'''simple docstring'''
if issubclass(lowercase_ , lowercase_ ):
__SCREAMING_SNAKE_CASE : str = text_path
elif issubclass(lowercase_ , lowercase_ ):
__SCREAMING_SNAKE_CASE : Any = [text_path]
__SCREAMING_SNAKE_CASE : List[Any] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE : List[Any] = {'''text''': '''string'''}
__SCREAMING_SNAKE_CASE : List[Any] = TextDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_text_dataset(lowercase_ , lowercase_ )
def lowerCAmelCase_ ( lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any]=("train",) ):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_ )
for split in splits:
__SCREAMING_SNAKE_CASE : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_ ( lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE : Optional[int] = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__SCREAMING_SNAKE_CASE : Optional[Any] = TextDatasetReader({'''train''': text_path} , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_text_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def lowerCAmelCase_ ( lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__SCREAMING_SNAKE_CASE : str = {'''text''': '''string'''}
__SCREAMING_SNAKE_CASE : Optional[Any] = features.copy() if features else default_expected_features
__SCREAMING_SNAKE_CASE : Dict = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE : List[Any] = TextDatasetReader({'''train''': text_path} , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_text_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_ ( lowercase_ : int , lowercase_ : str , lowercase_ : Optional[Any] ):
'''simple docstring'''
if split:
__SCREAMING_SNAKE_CASE : Tuple = {split: text_path}
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''train'''
__SCREAMING_SNAKE_CASE : List[str] = {'''train''': text_path, '''test''': text_path}
__SCREAMING_SNAKE_CASE : Any = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE : List[str] = {'''text''': '''string'''}
__SCREAMING_SNAKE_CASE : Optional[Any] = TextDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_text_datasetdict(lowercase_ , lowercase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 401 |
"""simple docstring"""
def lowerCAmelCase_ ( lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : Optional[int] ):
'''simple docstring'''
if index == r:
for j in range(lowercase_ ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__SCREAMING_SNAKE_CASE : str = arr[i]
combination_util(lowercase_ , lowercase_ , lowercase_ , index + 1 , lowercase_ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCAmelCase_ ( lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowercase_ , lowercase_ , lowercase_ , 0 , lowercase_ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_lowerCamelCase = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 401 | 1 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __UpperCAmelCase ( __a ):
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = tempfile.mkdtemp()
lowerCAmelCase_ = 8
# DPR tok
lowerCAmelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCAmelCase_ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_snake_case , exist_ok=_snake_case )
lowerCAmelCase_ = os.path.join(_snake_case , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
lowerCAmelCase_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCAmelCase_ = {'''unk_token''': '''<unk>'''}
lowerCAmelCase_ = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_snake_case , exist_ok=_snake_case )
lowerCAmelCase_ = os.path.join(_snake_case , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase_ = os.path.join(_snake_case , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def UpperCAmelCase_ ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def UpperCAmelCase_ ( self ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def UpperCAmelCase_ ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.get_dummy_dataset()
lowerCAmelCase_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
lowerCAmelCase_ = dataset
lowerCAmelCase_ = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCAmelCase_ ( self , _lowerCamelCase ):
lowerCAmelCase_ = self.get_dummy_dataset()
lowerCAmelCase_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
lowerCAmelCase_ = os.path.join(self.tmpdirname , '''dataset''' )
lowerCAmelCase_ = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
lowerCAmelCase_ = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowerCAmelCase_ = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _snake_case ) , )
return retriever
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase_ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
lowerCAmelCase_ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
lowerCAmelCase_ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_snake_case , open(_snake_case , '''wb''' ) )
lowerCAmelCase_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
lowerCAmelCase_ = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = 1
lowerCAmelCase_ = self.get_dummy_canonical_hf_index_retriever()
lowerCAmelCase_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _snake_case )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
lowerCAmelCase_ = self.get_dummy_dataset()
retriever.save_pretrained(_snake_case )
lowerCAmelCase_ = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
lowerCAmelCase_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase_ = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = 1
lowerCAmelCase_ = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
lowerCAmelCase_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _snake_case )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
lowerCAmelCase_ = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
lowerCAmelCase_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase_ = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = 1
lowerCAmelCase_ = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
lowerCAmelCase_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _snake_case )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
lowerCAmelCase_ = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
lowerCAmelCase_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase_ = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = 1
lowerCAmelCase_ = self.get_dummy_legacy_index_retriever()
lowerCAmelCase_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _snake_case )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
lowerCAmelCase_ = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
lowerCAmelCase_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase_ = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
import torch
lowerCAmelCase_ = 1
lowerCAmelCase_ = self.get_dummy_canonical_hf_index_retriever()
lowerCAmelCase_ = [[5, 7], [10, 11]]
lowerCAmelCase_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase_ = retriever(_snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case )
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_snake_case , _snake_case )
self.assertIsInstance(_snake_case , _snake_case )
self.assertIsInstance(_snake_case , np.ndarray )
lowerCAmelCase_ = retriever(
_snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case , return_tensors='''pt''' , )
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_snake_case , torch.Tensor )
self.assertIsInstance(_snake_case , torch.Tensor )
self.assertIsInstance(_snake_case , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.get_dpr_ctx_encoder_tokenizer()
lowerCAmelCase_ = 1
lowerCAmelCase_ = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
retriever.set_ctx_encoder_tokenizer(_snake_case )
lowerCAmelCase_ = [[5, 7], [10, 11]]
lowerCAmelCase_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase_ = retriever(_snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case )
self.assertEqual(
len(_snake_case ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _snake_case ) # check for doc token related keys in dictionary.
| 274 |
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CustomTokenizer
pass
| 316 | 0 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
snake_case__ : Dict = logging.getLogger(__name__)
snake_case__ : List[str] = 'Hello world! cécé herlolip'
snake_case__ : Union[str, Any] = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
_UpperCAmelCase =BertAbsConfig(
temp_dir="." , finetune_bert=_lowerCamelCase , large=_lowerCamelCase , share_emb=_lowerCamelCase , use_bert_emb=_lowerCamelCase , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
_UpperCAmelCase =torch.load(_lowerCamelCase , lambda _lowerCamelCase , _lowerCamelCase : storage )
_UpperCAmelCase =AbsSummarizer(_lowerCamelCase , torch.device("cpu" ) , _lowerCamelCase )
original.eval()
_UpperCAmelCase =BertAbsSummarizer(_lowerCamelCase , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
_UpperCAmelCase =BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
_UpperCAmelCase =tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_lowerCamelCase )) )
_UpperCAmelCase =torch.tensor(_lowerCamelCase ).unsqueeze(0 )
_UpperCAmelCase =tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_lowerCamelCase )) )
_UpperCAmelCase =torch.tensor(_lowerCamelCase ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_UpperCAmelCase =encoder_input_ids
_UpperCAmelCase =decoder_input_ids
_UpperCAmelCase =_UpperCAmelCase =None
_UpperCAmelCase =None
_UpperCAmelCase =_UpperCAmelCase =None
_UpperCAmelCase =_UpperCAmelCase =None
_UpperCAmelCase =None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_UpperCAmelCase =original(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )[0]
_UpperCAmelCase =original.generator(_lowerCamelCase )
_UpperCAmelCase =new_model(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )[0]
_UpperCAmelCase =new_model.generator(_lowerCamelCase )
_UpperCAmelCase =torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_lowerCamelCase ) )
_UpperCAmelCase =torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_lowerCamelCase ) )
_UpperCAmelCase =torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
snake_case__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
snake_case__ : List[Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 592 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="ylacombe/bark-small"
_UpperCAmelCase =tempfile.mkdtemp()
_UpperCAmelCase ="en_speaker_1"
_UpperCAmelCase ="This is a test string"
_UpperCAmelCase ="speaker_embeddings_path.json"
_UpperCAmelCase ="speaker_embeddings"
def SCREAMING_SNAKE_CASE ( self , **_snake_case ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_tokenizer()
_UpperCAmelCase =BarkProcessor(tokenizer=_snake_case )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase =BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_UpperCAmelCase =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase =BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_UpperCAmelCase =35
_UpperCAmelCase =2
_UpperCAmelCase =8
_UpperCAmelCase ={
"semantic_prompt": np.ones(_snake_case ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_UpperCAmelCase =processor(text=self.input_string , voice_preset=_snake_case )
_UpperCAmelCase =inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_snake_case , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_UpperCAmelCase =os.path.join(self.tmpdirname , "file.npz" )
np.savez(_snake_case , **_snake_case )
_UpperCAmelCase =processor(text=self.input_string , voice_preset=_snake_case )
_UpperCAmelCase =inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_snake_case , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_UpperCAmelCase =processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_tokenizer()
_UpperCAmelCase =BarkProcessor(tokenizer=_snake_case )
_UpperCAmelCase =processor(text=self.input_string )
_UpperCAmelCase =tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=_snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 592 | 1 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ :Tuple = logging.get_logger(__name__)
a_ :Dict = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a_ :Dict = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a_ :Dict = {
'abeja/gpt-neox-japanese-2.7b': 20_48,
}
def a ( A__ , A__ ) -> str:
'''simple docstring'''
with open(A__ , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE__ : str = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ : str = collections.OrderedDict()
SCREAMING_SNAKE_CASE__ : str = collections.OrderedDict()
SCREAMING_SNAKE_CASE__ : str = collections.OrderedDict()
with open(A__ , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE__ : int = f.readlines()
SCREAMING_SNAKE_CASE__ : str = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(A__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = b
SCREAMING_SNAKE_CASE__ : str = idx
for wd in b:
SCREAMING_SNAKE_CASE__ : List[str] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[str] = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = ['''input_ids''', '''attention_mask''']
def __init__( self : str , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Union[str, Any]="<|endoftext|>" , _lowercase : Optional[int]="<|endoftext|>" , _lowercase : Optional[Any]="<|startoftext|>" , _lowercase : Any="<|endoftext|>" , _lowercase : Optional[int]=False , **_lowercase : Optional[Any] , ):
super().__init__(
unk_token=_lowercase , pad_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , do_clean_text=_lowercase , **_lowercase , )
if not os.path.isfile(_lowercase ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(_lowercase ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
SCREAMING_SNAKE_CASE__ : str = do_clean_text
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = load_vocab_and_emoji(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowercase__ ( self : Optional[Any] ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def lowercase__ ( self : Optional[int] ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowercase__ ( self : List[Any] , _lowercase : Dict ):
return self.subword_tokenizer.tokenize(_lowercase , clean=self.do_clean_text )
def lowercase__ ( self : List[str] , _lowercase : List[Any] ):
return self.vocab.get(_lowercase , self.vocab.get(self.unk_token ) )
def lowercase__ ( self : str , _lowercase : int ):
return self.subword_tokenizer.convert_id_to_token(_lowercase )
def lowercase__ ( self : Union[str, Any] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : Optional[int] = ''''''.join(_lowercase ).strip()
return out_string
def lowercase__ ( self : Dict , _lowercase : "Conversation" ):
SCREAMING_SNAKE_CASE__ : Tuple = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowercase , add_special_tokens=_lowercase ) + [self.eos_token_id] )
if len(_lowercase ) > self.model_max_length:
SCREAMING_SNAKE_CASE__ : List[str] = input_ids[-self.model_max_length :]
return input_ids
def lowercase__ ( self : str , _lowercase : str , _lowercase : Optional[str] = None ):
SCREAMING_SNAKE_CASE__ : List[Any] = 0
if os.path.isdir(_lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
SCREAMING_SNAKE_CASE__ : Tuple = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
SCREAMING_SNAKE_CASE__ : List[str] = token_index
writer.write(''','''.join(_lowercase ) + '''\n''' )
index += 1
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as writer:
json.dump(self.emoji , _lowercase )
return vocab_file, emoji_file
class lowercase ( _UpperCAmelCase ):
def __init__( self : Optional[int] , _lowercase : Optional[int] , _lowercase : int , _lowercase : Dict ):
SCREAMING_SNAKE_CASE__ : Any = vocab # same as swe
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE__ : List[str] = emoji
SCREAMING_SNAKE_CASE__ : List[str] = np.max([len(_lowercase ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE__ : List[Any] = re.compile(R'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(R'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
SCREAMING_SNAKE_CASE__ : str = re.compile(R'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(
R'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(
R'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
SCREAMING_SNAKE_CASE__ : List[Any] = re.compile(
R'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
SCREAMING_SNAKE_CASE__ : List[str] = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
SCREAMING_SNAKE_CASE__ : Any = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self : List[str] ):
return len(self.ids_to_tokens )
def lowercase__ ( self : Any , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Dict = self.content_repattera.sub('''<URL>''' , _lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = self.content_repattera.sub('''<EMAIL>''' , _lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.content_repattera.sub('''<TEL>''' , _lowercase )
SCREAMING_SNAKE_CASE__ : Dict = self.content_repattera.sub('''<DATE>''' , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.content_repattera.sub('''<DATE>''' , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.content_repattera.sub('''<PRICE>''' , _lowercase )
SCREAMING_SNAKE_CASE__ : Dict = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE__ : Any = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' )
return content
def lowercase__ ( self : Optional[int] , _lowercase : Tuple , _lowercase : Union[str, Any]=False ):
SCREAMING_SNAKE_CASE__ : List[str] = text.replace(''' ''' , '''<SP>''' )
SCREAMING_SNAKE_CASE__ : int = text.replace(''' ''' , '''<SP>''' )
SCREAMING_SNAKE_CASE__ : int = text.replace('''\r\n''' , '''<BR>''' )
SCREAMING_SNAKE_CASE__ : Dict = text.replace('''\n''' , '''<BR>''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = text.replace('''\r''' , '''<BR>''' )
SCREAMING_SNAKE_CASE__ : Any = text.replace('''\t''' , '''<TAB>''' )
SCREAMING_SNAKE_CASE__ : List[str] = text.replace('''—''' , '''ー''' )
SCREAMING_SNAKE_CASE__ : Dict = text.replace('''−''' , '''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE__ : Optional[Any] = text.replace(_lowercase , _lowercase )
if clean:
SCREAMING_SNAKE_CASE__ : str = self.clean_text(_lowercase )
def check_simbol(_lowercase : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = x.encode()
if len(_lowercase ) == 1 and len(_lowercase ) == 2:
SCREAMING_SNAKE_CASE__ : Any = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc_2_a_1 and c <= 0Xc_2_b_f)
or (c >= 0Xc_7_8_0 and c <= 0Xc_7_8_3)
or (c >= 0Xc_a_b_9 and c <= 0Xc_b_b_f)
or (c >= 0Xc_c_8_0 and c <= 0Xc_d_a_2)
):
return True
return False
def checkuae(_lowercase : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = x.encode()
if len(_lowercase ) == 1 and len(_lowercase ) == 3:
SCREAMING_SNAKE_CASE__ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe_2_8_0_8_0 and c <= 0Xe_2_b_0_7_f:
return True
return False
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
while pos < len(_lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(len(_lowercase ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
SCREAMING_SNAKE_CASE__ : Tuple = [] # (token_id, token, pos)
for e in range(_lowercase , _lowercase , -1 ):
SCREAMING_SNAKE_CASE__ : List[Any] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_lowercase ) > 2:
SCREAMING_SNAKE_CASE__ : int = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_lowercase ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = sorted(_lowercase , key=lambda _lowercase : x[0] )[0]
result.append(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = e
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pos + 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = text[pos:end]
if check_simbol(_lowercase ):
result.append('''<KIGOU>''' )
elif checkuae(_lowercase ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = end
return result
def lowercase__ ( self : List[str] , _lowercase : Dict , _lowercase : int="\n" ):
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : int = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_lowercase ) > 0:
words.append(bytearray(_lowercase ).decode('''utf-8''' , errors='''replace''' ) )
SCREAMING_SNAKE_CASE__ : List[str] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(_lowercase )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(_lowercase )
if len(_lowercase ) > 0:
words.append(bytearray(_lowercase ).decode('''utf-8''' , errors='''replace''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] = ''''''.join(_lowercase )
return text
| 35 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a_ :List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase :
lowerCamelCase : str
lowerCamelCase : List[str]
lowerCamelCase : Optional[List[str]]
@dataclass
class lowercase :
lowerCamelCase : List[int]
lowerCamelCase : List[int]
lowerCamelCase : Optional[List[int]] = None
lowerCamelCase : Optional[List[int]] = None
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = '''train'''
lowerCamelCase : Tuple = '''dev'''
lowerCamelCase : Any = '''test'''
class lowercase :
@staticmethod
def lowercase__ ( _lowercase : Any , _lowercase : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : str ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : List[InputExample] , _lowercase : List[str] , _lowercase : int , _lowercase : PreTrainedTokenizer , _lowercase : int=False , _lowercase : Optional[Any]="[CLS]" , _lowercase : Tuple=1 , _lowercase : Optional[Any]="[SEP]" , _lowercase : Tuple=False , _lowercase : Optional[Any]=False , _lowercase : List[Any]=0 , _lowercase : Optional[int]=0 , _lowercase : Optional[Any]=-1_00 , _lowercase : Tuple=0 , _lowercase : Union[str, Any]=True , ):
SCREAMING_SNAKE_CASE__ : Tuple = {label: i for i, label in enumerate(_lowercase )}
SCREAMING_SNAKE_CASE__ : Dict = []
for ex_index, example in enumerate(_lowercase ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , _lowercase , len(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for word, label in zip(example.words , example.labels ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize(_lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowercase ) > 0:
tokens.extend(_lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.num_special_tokens_to_add()
if len(_lowercase ) > max_seq_length - special_tokens_count:
SCREAMING_SNAKE_CASE__ : List[str] = tokens[: (max_seq_length - special_tokens_count)]
SCREAMING_SNAKE_CASE__ : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [sequence_a_segment_id] * len(_lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [cls_token] + tokens
SCREAMING_SNAKE_CASE__ : Tuple = [pad_token_label_id] + label_ids
SCREAMING_SNAKE_CASE__ : Tuple = [cls_token_segment_id] + segment_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
SCREAMING_SNAKE_CASE__ : str = [1 if mask_padding_with_zero else 0] * len(_lowercase )
# Zero-pad up to the sequence length.
SCREAMING_SNAKE_CASE__ : List[str] = max_seq_length - len(_lowercase )
if pad_on_left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ([pad_token] * padding_length) + input_ids
SCREAMING_SNAKE_CASE__ : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
SCREAMING_SNAKE_CASE__ : Tuple = ([pad_token_segment_id] * padding_length) + segment_ids
SCREAMING_SNAKE_CASE__ : int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(_lowercase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(_lowercase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(_lowercase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(_lowercase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(_lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : List[Any] = None
features.append(
InputFeatures(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , label_ids=_lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : Optional[int]=False , _lowercase : Split = Split.train , ):
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
_lowercase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
SCREAMING_SNAKE_CASE__ : Any = torch.load(_lowercase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
SCREAMING_SNAKE_CASE__ : str = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : Any = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _lowercase )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Optional[int] , _lowercase : List[str] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase :
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = -100
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : List[str]=False , _lowercase : Split = Split.train , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : List[str] = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : Optional[Any] , _lowercase : Union[str, Any] ):
return self.features[i]
| 35 | 1 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int ):
A__ = [1]
A__ = 0, 0, 0
A__ = ugly_nums[ia] * 2
A__ = ugly_nums[ia] * 3
A__ = ugly_nums[ia] * 5
for _ in range(1 , snake_case__ ):
A__ = min(snake_case__ , snake_case__ , snake_case__ )
ugly_nums.append(snake_case__ )
if next_num == next_a:
ia += 1
A__ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
A__ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
A__ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_0_0) = }""")
| 706 |
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class a ( _lowerCamelCase ):
"""simple docstring"""
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase , """num_encoder_blocks""" ) )
class a :
"""simple docstring"""
def __init__( self: str , UpperCamelCase: Dict , UpperCamelCase: int=13 , UpperCamelCase: Optional[int]=64 , UpperCamelCase: List[Any]=3 , UpperCamelCase: List[Any]=4 , UpperCamelCase: Optional[Any]=[2, 2, 2, 2] , UpperCamelCase: Any=[8, 4, 2, 1] , UpperCamelCase: Optional[int]=[16, 32, 64, 1_28] , UpperCamelCase: str=[1, 4, 8, 16] , UpperCamelCase: Dict=[1, 2, 4, 8] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: List[str]="gelu" , UpperCamelCase: Tuple=0.1 , UpperCamelCase: Optional[int]=0.1 , UpperCamelCase: Tuple=0.02 , UpperCamelCase: int=3 , UpperCamelCase: str=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_encoder_blocks
A__ = sr_ratios
A__ = depths
A__ = hidden_sizes
A__ = downsampling_rates
A__ = num_attention_heads
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = num_labels
A__ = scope
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Tuple , UpperCamelCase: str , UpperCamelCase: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = SegformerModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase )
A__ = A__ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Tuple , UpperCamelCase: str , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = self.num_labels
A__ = SegformerForSemanticSegmentation(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: str , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = 1
A__ = SegformerForSemanticSegmentation(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCamelCase )
A__ = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = SegformerModelTester(self )
A__ = SegformerConfigTester(self , config_class=UpperCamelCase )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*UpperCamelCase )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.attentions
A__ = sum(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ = (self.model_tester.image_size // 32) ** 2
A__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ = len(UpperCamelCase )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase: Dict , UpperCamelCase: Tuple , UpperCamelCase: Dict ):
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_encoder_blocks
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase ):
continue
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.train()
A__ = self._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
A__ = model(**UpperCamelCase ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
pass
@slow
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = SegformerModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def _snake_case ( ):
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=UpperCamelCase , align=UpperCamelCase , do_random_crop=UpperCamelCase )
A__ = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
UpperCamelCase )
A__ = prepare_img()
A__ = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
A__ = encoded_inputs.pixel_values.to(UpperCamelCase )
with torch.no_grad():
A__ = model(UpperCamelCase )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
A__ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=UpperCamelCase , align=UpperCamelCase , do_random_crop=UpperCamelCase )
A__ = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(UpperCamelCase )
A__ = prepare_img()
A__ = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
A__ = encoded_inputs.pixel_values.to(UpperCamelCase )
with torch.no_grad():
A__ = model(UpperCamelCase )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
A__ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase , atol=1e-1 ) )
@slow
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=UpperCamelCase , align=UpperCamelCase , do_random_crop=UpperCamelCase )
A__ = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
UpperCamelCase )
A__ = prepare_img()
A__ = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
A__ = encoded_inputs.pixel_values.to(UpperCamelCase )
with torch.no_grad():
A__ = model(UpperCamelCase )
A__ = outputs.logits.detach().cpu()
A__ = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase , target_sizes=[(5_00, 3_00)] )
A__ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , UpperCamelCase )
A__ = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase )
A__ = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , UpperCamelCase )
| 500 | 0 |
import math
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> bool:
return math.sqrt(lowerCAmelCase__ ) * math.sqrt(lowerCAmelCase__ ) == num
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> bool:
lowercase__ : Optional[Any] = 0
lowercase__ : List[Any] = n
while left <= right:
lowercase__ : int = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowercase__ : Dict = mid - 1
else:
lowercase__ : List[Any] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 397 |
"""simple docstring"""
def lowercase__ ( lowerCAmelCase__ : str ) -> list[int]:
'''simple docstring'''
a__ : List[str] = [0 for i in range(len(lowerCAmelCase__ ) )]
# initialize interval's left pointer and right pointer
a__ , a__ : int = 0, 0
for i in range(1 , len(lowerCAmelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
a__ : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
a__ : List[str] = min_edge
while go_next(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
a__ , a__ : str = i, i + z_result[i] - 1
return z_result
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(lowerCAmelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def lowercase__ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> int:
'''simple docstring'''
a__ : List[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
a__ : List[str] = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(lowerCAmelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 642 | 0 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : bool = False ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] =f'''Expected string as input, found {type(__lowerCamelCase )}'''
raise ValueError(__lowerCamelCase )
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : Tuple =f'''Expected boolean as use_pascal parameter, found {type(__lowerCamelCase )}'''
raise ValueError(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =input_str.split('''_''' )
lowerCamelCase__ : Union[str, Any] =0 if use_pascal else 1
lowerCamelCase__ : Tuple =words[start_index:]
lowerCamelCase__ : Optional[Any] =[word[0].upper() + word[1:] for word in words_to_capitalize]
lowerCamelCase__ : Dict ='''''' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 625 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = StableUnCLIPImgaImgPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def snake_case ( self : List[str] )-> str:
lowerCamelCase__ : Dict =32
lowerCamelCase__ : Optional[Any] =embedder_hidden_size
# image encoding components
lowerCamelCase__ : Dict =CLIPImageProcessor(crop_size=32, size=32 )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] =CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase, projection_dim=lowerCamelCase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
lowerCamelCase__ : Dict =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCamelCase__ : Tuple =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=lowerCamelCase, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) )
torch.manual_seed(0 )
lowerCamelCase__ : Dict =UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='''projection''', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCamelCase, layers_per_block=1, upcast_attention=lowerCamelCase, use_linear_projection=lowerCamelCase, )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =DDIMScheduler(
beta_schedule='''scaled_linear''', beta_start=0.00_085, beta_end=0.012, prediction_type='''v_prediction''', set_alpha_to_one=lowerCamelCase, steps_offset=1, )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =AutoencoderKL()
lowerCamelCase__ : int ={
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Any=0, lowerCamelCase : str=True )-> List[str]:
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : List[Any] =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : Any =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : Dict =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
lowerCamelCase__ : int =input_image * 0.5 + 0.5
lowerCamelCase__ : Dict =input_image.clamp(0, 1 )
lowerCamelCase__ : List[str] =input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase__ : Dict =DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def snake_case ( self : List[str] )-> Optional[Any]:
lowerCamelCase__ : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : str =self.get_dummy_components()
lowerCamelCase__ : int =StableUnCLIPImgaImgPipeline(**lowerCamelCase )
lowerCamelCase__ : Any =sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase )
inputs.update({'''image_embeds''': None} )
lowerCamelCase__ : Any =sd_pipe(**lowerCamelCase ).images
lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ : Union[str, Any] =np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : Tuple =torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def snake_case ( self : int )-> Optional[Any]:
lowerCamelCase__ : List[Any] =torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def snake_case ( self : List[str] )-> List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : Optional[int] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[Any] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : int =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Any =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : List[Any] =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Tuple:
lowerCamelCase__ : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[int] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : str =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Tuple =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : Tuple =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ : Any =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
lowerCamelCase__ : Optional[Any] =pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : List[Any] =pipe(
lowerCamelCase, '''anime turtle''', num_inference_steps=2, output_type='''np''', )
lowerCamelCase__ : Optional[int] =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 625 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_UpperCAmelCase : str = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , **snake_case_ ):
super().__init__(**snake_case_ )
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , '''vision''' )
self.check_model_type(snake_case_ )
def __call__( self , snake_case_ , snake_case_ = None , **snake_case_ , ):
if "text_queries" in kwargs:
lowercase =kwargs.pop('''text_queries''' )
if isinstance(snake_case_ , (str, Image.Image) ):
lowercase ={'''image''': image, '''candidate_labels''': candidate_labels}
else:
lowercase =image
lowercase =super().__call__(snake_case_ , **snake_case_ )
return results
def _A( self , **snake_case_ ):
lowercase ={}
if "threshold" in kwargs:
lowercase =kwargs['''threshold''']
if "top_k" in kwargs:
lowercase =kwargs['''top_k''']
return {}, {}, postprocess_params
def _A( self , snake_case_ ):
lowercase =load_image(inputs['''image'''] )
lowercase =inputs['''candidate_labels''']
if isinstance(snake_case_ , snake_case_ ):
lowercase =candidate_labels.split(''',''' )
lowercase =torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case_ ):
lowercase =self.tokenizer(snake_case_ , return_tensors=self.framework )
lowercase =self.image_processor(snake_case_ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _A( self , snake_case_ ):
lowercase =model_inputs.pop('''target_size''' )
lowercase =model_inputs.pop('''candidate_label''' )
lowercase =model_inputs.pop('''is_last''' )
lowercase =self.model(**snake_case_ )
lowercase ={'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def _A( self , snake_case_ , snake_case_=0.1 , snake_case_=None ):
lowercase =[]
for model_output in model_outputs:
lowercase =model_output['''candidate_label''']
lowercase =BaseModelOutput(snake_case_ )
lowercase =self.image_processor.post_process_object_detection(
outputs=snake_case_ , threshold=snake_case_ , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
lowercase =outputs['''scores'''][index].item()
lowercase =self._get_bounding_box(outputs['''boxes'''][index][0] )
lowercase ={'''score''': score, '''label''': label, '''box''': box}
results.append(snake_case_ )
lowercase =sorted(snake_case_ , key=lambda snake_case_ : x["score"] , reverse=snake_case_ )
if top_k:
lowercase =results[:top_k]
return results
def _A( self , snake_case_ ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
lowercase , lowercase , lowercase , lowercase =box.int().tolist()
lowercase ={
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 72 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( _a ):
a : Optional[Any] = ['''image_processor''', '''tokenizer''']
a : Optional[Any] = '''ChineseCLIPImageProcessor'''
a : List[str] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ):
_lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCamelCase , )
_lowercase = kwargs.pop("""feature_extractor""" )
_lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCamelCase , __UpperCamelCase )
_lowercase = self.image_processor
def __call__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_lowercase = self.tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if images is not None:
_lowercase = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if text is not None and images is not None:
_lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def UpperCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def UpperCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def UpperCamelCase_ ( self ):
_lowercase = self.tokenizer.model_input_names
_lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase_ ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCamelCase , )
return self.image_processor_class | 287 | 0 |
from __future__ import annotations
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[tuple[int, int]]:
UpperCAmelCase__ , UpperCAmelCase__ = position
UpperCAmelCase__ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase__ = []
for position in positions:
UpperCAmelCase__ , UpperCAmelCase__ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_SCREAMING_SNAKE_CASE )
return permissible_positions
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->bool:
return not any(elem == 0 for row in board for elem in row )
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->bool:
if is_complete(_SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase__ , UpperCAmelCase__ = position
if board[y][x] == 0:
UpperCAmelCase__ = curr + 1
if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , curr + 1 ):
return True
UpperCAmelCase__ = 0
return False
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->list[list[int]]:
UpperCAmelCase__ = [[0 for i in range(_SCREAMING_SNAKE_CASE )] for j in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ = 1
if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
UpperCAmelCase__ = 0
UpperCAmelCase__ = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase__ = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCAmelCase__ = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
UpperCAmelCase__ = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowercase , __lowercase )
def A__ ( self , **__lowercase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def A__ ( self , **__lowercase ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def A__ ( self ):
shutil.rmtree(self.tmpdirname )
def A__ ( self ):
UpperCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase__ = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A__ ( self ):
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def A__ ( self ):
UpperCAmelCase__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCAmelCase__ = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
UpperCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def A__ ( self ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = image_processor(__lowercase , return_tensors="""np""" )
UpperCAmelCase__ = processor(images=__lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A__ ( self ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase__ = """lower newer"""
UpperCAmelCase__ = processor(text=__lowercase )
UpperCAmelCase__ = tokenizer(__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A__ ( self ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase__ = """lower newer"""
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(__lowercase ):
processor()
def A__ ( self ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase__ = processor.batch_decode(__lowercase )
UpperCAmelCase__ = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def A__ ( self ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase__ = """lower newer"""
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 422 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __a ( _UpperCAmelCase ):
"""simple docstring"""
_A : Any = "blenderbot-small"
_A : Dict = ["past_key_values"]
_A : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Tuple ,_UpperCamelCase : Optional[int]=5_0_2_6_5 ,_UpperCamelCase : List[Any]=5_1_2 ,_UpperCamelCase : Union[str, Any]=8 ,_UpperCamelCase : List[Any]=2_0_4_8 ,_UpperCamelCase : List[str]=1_6 ,_UpperCamelCase : Union[str, Any]=8 ,_UpperCamelCase : Union[str, Any]=2_0_4_8 ,_UpperCamelCase : Dict=1_6 ,_UpperCamelCase : Dict=0.0 ,_UpperCamelCase : Tuple=0.0 ,_UpperCamelCase : Tuple=True ,_UpperCamelCase : Dict=True ,_UpperCamelCase : Union[str, Any]="gelu" ,_UpperCamelCase : Optional[Any]=5_1_2 ,_UpperCamelCase : Tuple=0.1 ,_UpperCamelCase : Optional[int]=0.0 ,_UpperCamelCase : List[Any]=0.0 ,_UpperCamelCase : Optional[int]=0.02 ,_UpperCamelCase : Tuple=1 ,_UpperCamelCase : int=False ,_UpperCamelCase : Optional[Any]=0 ,_UpperCamelCase : Dict=1 ,_UpperCamelCase : Optional[int]=2 ,_UpperCamelCase : Union[str, Any]=2 ,**_UpperCamelCase : List[str] ,) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =vocab_size
SCREAMING_SNAKE_CASE__ =max_position_embeddings
SCREAMING_SNAKE_CASE__ =d_model
SCREAMING_SNAKE_CASE__ =encoder_ffn_dim
SCREAMING_SNAKE_CASE__ =encoder_layers
SCREAMING_SNAKE_CASE__ =encoder_attention_heads
SCREAMING_SNAKE_CASE__ =decoder_ffn_dim
SCREAMING_SNAKE_CASE__ =decoder_layers
SCREAMING_SNAKE_CASE__ =decoder_attention_heads
SCREAMING_SNAKE_CASE__ =dropout
SCREAMING_SNAKE_CASE__ =attention_dropout
SCREAMING_SNAKE_CASE__ =activation_dropout
SCREAMING_SNAKE_CASE__ =activation_function
SCREAMING_SNAKE_CASE__ =init_std
SCREAMING_SNAKE_CASE__ =encoder_layerdrop
SCREAMING_SNAKE_CASE__ =decoder_layerdrop
SCREAMING_SNAKE_CASE__ =use_cache
SCREAMING_SNAKE_CASE__ =encoder_layers
SCREAMING_SNAKE_CASE__ =scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCAmelCase ,bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,is_encoder_decoder=_lowerCAmelCase ,decoder_start_token_id=_lowerCAmelCase ,forced_eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase ,)
class __a ( _UpperCAmelCase ):
"""simple docstring"""
@property
def __A ( self : Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ =OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ ={0: """batch"""}
SCREAMING_SNAKE_CASE__ ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
SCREAMING_SNAKE_CASE__ ={0: """batch""", 1: """decoder_sequence"""}
SCREAMING_SNAKE_CASE__ ={0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE__ =OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =self.num_layers
for i in range(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ ={0: """batch""", 2: """past_sequence + sequence"""}
SCREAMING_SNAKE_CASE__ ={0: """batch""", 2: """past_sequence + sequence"""}
else:
SCREAMING_SNAKE_CASE__ =OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def __A ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ =super().outputs
else:
SCREAMING_SNAKE_CASE__ =super(_lowerCAmelCase ,self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =self.num_layers
for i in range(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ ={0: """batch""", 2: """past_sequence + sequence"""}
SCREAMING_SNAKE_CASE__ ={0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def __A ( self : List[Any] ,_UpperCamelCase : List[str] ,_UpperCamelCase : List[str] = -1 ,_UpperCamelCase : Any = -1 ,_UpperCamelCase : Tuple = False ,_UpperCamelCase : Any = None ,) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# Generate decoder inputs
SCREAMING_SNAKE_CASE__ =seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE__ =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ ={f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE__ =dict(**_lowerCAmelCase ,**_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =common_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE__ =common_inputs["""decoder_input_ids"""].shape[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =self.num_attention_heads
SCREAMING_SNAKE_CASE__ =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ =decoder_seq_length + 3
SCREAMING_SNAKE_CASE__ =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ =torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase )] ,dim=1 )
SCREAMING_SNAKE_CASE__ =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =self.num_layers
SCREAMING_SNAKE_CASE__ =min(_lowerCAmelCase ,_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ =max(_lowerCAmelCase ,_lowerCAmelCase ) - min_num_layers
SCREAMING_SNAKE_CASE__ ="""encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(_lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE__ =encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(_lowerCAmelCase ,_lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) )
return common_inputs
def __A ( self : Dict ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : List[Any] = -1 ,_UpperCamelCase : List[str] = -1 ,_UpperCamelCase : Optional[int] = False ,_UpperCamelCase : Optional[Any] = None ,) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE__ =seqlen + 2
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =self.num_layers
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =self.num_attention_heads
SCREAMING_SNAKE_CASE__ =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ =common_inputs["""attention_mask"""].dtype
SCREAMING_SNAKE_CASE__ =torch.cat(
[common_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 )
SCREAMING_SNAKE_CASE__ =[
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase )
]
return common_inputs
def __A ( self : List[str] ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : str = -1 ,_UpperCamelCase : Optional[int] = -1 ,_UpperCamelCase : Any = False ,_UpperCamelCase : Optional[int] = None ,) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =compute_effective_axis_dimension(
_lowerCAmelCase ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ =tokenizer.num_special_tokens_to_add(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ =compute_effective_axis_dimension(
_lowerCAmelCase ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE__ =[""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE__ =dict(tokenizer(_lowerCAmelCase ,return_tensors=_lowerCAmelCase ) )
return common_inputs
def __A ( self : List[str] ,_UpperCamelCase : Dict ,_UpperCamelCase : Tuple = -1 ,_UpperCamelCase : str = -1 ,_UpperCamelCase : Tuple = False ,_UpperCamelCase : Dict = None ,) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE__ =self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
return common_inputs
def __A ( self : Tuple ,_UpperCamelCase : Any ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : str ,_UpperCamelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ =super()._flatten_past_key_values_(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ =super(_lowerCAmelCase ,self )._flatten_past_key_values_(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
| 151 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
lowercase = model.config
lowercase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
lowercase = MBartConfig(
is_decoder=lowercase_ , is_encoder_decoder=lowercase_ , add_cross_attention=lowercase_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowercase_ , add_final_layer_norm=lowercase_ , )
return encoder_config, decoder_config
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
if "encoder.model" in name:
lowercase = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
lowercase = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
lowercase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
lowercase = """encoder.""" + name
if "attn.proj" in name:
lowercase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
lowercase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
lowercase = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
lowercase = """encoder.layernorm.bias"""
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ):
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[3] )
lowercase = int(key_split[5] )
lowercase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : str=None , lowercase_ : Optional[Any]=False ):
# load original model
lowercase = DonutModel.from_pretrained(lowercase_ ).eval()
# load HuggingFace model
lowercase , lowercase = get_configs(lowercase_ )
lowercase = DonutSwinModel(lowercase_ )
lowercase = MBartForCausalLM(lowercase_ )
lowercase = VisionEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
model.eval()
lowercase = original_model.state_dict()
lowercase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# verify results on scanned document
lowercase = load_dataset("""hf-internal-testing/example-documents""" )
lowercase = dataset["""test"""][0]["""image"""].convert("""RGB""" )
lowercase = XLMRobertaTokenizerFast.from_pretrained(lowercase_ , from_slow=lowercase_ )
lowercase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowercase = DonutProcessor(lowercase_ , lowercase_ )
lowercase = processor(lowercase_ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowercase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowercase = """When is the coffee break?"""
lowercase = task_prompt.replace("""{user_input}""" , lowercase_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowercase = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowercase = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowercase = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowercase = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowercase = """hello world"""
else:
raise ValueError("""Model name not supported""" )
lowercase = original_model.decoder.tokenizer(lowercase_ , add_special_tokens=lowercase_ , return_tensors="""pt""" )[
"""input_ids"""
]
lowercase = original_model.encoder.model.patch_embed(lowercase_ )
lowercase , lowercase = model.encoder.embeddings(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
# verify encoder hidden states
lowercase = original_model.encoder(lowercase_ )
lowercase = model.encoder(lowercase_ ).last_hidden_state
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-2 )
# verify decoder hidden states
lowercase = original_model(lowercase_ , lowercase_ , lowercase_ ).logits
lowercase = model(lowercase_ , decoder_input_ids=lowercase_ ).logits
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
lowercase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
lowercase_ : Dict = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 588 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : int = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = '''xmod'''
def __init__( self :Union[str, Any] ,__UpperCAmelCase :Tuple=3_05_22 ,__UpperCAmelCase :int=7_68 ,__UpperCAmelCase :Tuple=12 ,__UpperCAmelCase :Tuple=12 ,__UpperCAmelCase :Tuple=30_72 ,__UpperCAmelCase :str="gelu" ,__UpperCAmelCase :str=0.1 ,__UpperCAmelCase :str=0.1 ,__UpperCAmelCase :str=5_12 ,__UpperCAmelCase :Optional[Any]=2 ,__UpperCAmelCase :Optional[int]=0.02 ,__UpperCAmelCase :Dict=1E-12 ,__UpperCAmelCase :List[str]=1 ,__UpperCAmelCase :str=0 ,__UpperCAmelCase :Any=2 ,__UpperCAmelCase :List[Any]="absolute" ,__UpperCAmelCase :Union[str, Any]=True ,__UpperCAmelCase :Optional[int]=None ,__UpperCAmelCase :int=False ,__UpperCAmelCase :Tuple=2 ,__UpperCAmelCase :Tuple=False ,__UpperCAmelCase :Dict=True ,__UpperCAmelCase :Tuple=True ,__UpperCAmelCase :str=("en_XX",) ,__UpperCAmelCase :int=None ,**__UpperCAmelCase :str ,) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
lowerCamelCase__ : Tuple = vocab_size
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : Union[str, Any] = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : Optional[Any] = intermediate_size
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : Optional[int] = position_embedding_type
lowerCamelCase__ : Optional[Any] = use_cache
lowerCamelCase__ : Optional[Any] = classifier_dropout
lowerCamelCase__ : Optional[int] = pre_norm
lowerCamelCase__ : Tuple = adapter_reduction_factor
lowerCamelCase__ : Dict = adapter_layer_norm
lowerCamelCase__ : Optional[Any] = adapter_reuse_layer_norm
lowerCamelCase__ : Dict = ln_before_adapter
lowerCamelCase__ : Dict = list(__UpperCAmelCase )
lowerCamelCase__ : List[Any] = default_language
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
'''simple docstring'''
@property
def lowercase_ ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase__ : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase__ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 714 | """simple docstring"""
UpperCAmelCase : Tuple = 8.314_462 # Unit - J mol-1 K-1
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 121 | 0 |
"""simple docstring"""
def _A (__a ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__a , __a ):
return 0
elif n == 2:
return 1
else:
SCREAMING_SNAKE_CASE_ : Tuple = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _A (__a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
while digits < n:
index += 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(str(fibonacci(__a ) ) )
return index
def _A (__a = 10_00 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__a )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 512 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : str , lowercase_ : List[str]=13 , lowercase_ : int=30 , lowercase_ : Optional[int]=2 , lowercase_ : List[str]=3 , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[int]=32 , lowercase_ : Tuple=5 , lowercase_ : str=4 , lowercase_ : Optional[int]=37 , lowercase_ : Tuple="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : List[Any]=10 , lowercase_ : Optional[int]=0.02 , lowercase_ : List[str]=None , lowercase_ : Optional[Any]=2 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE_ : int = patch_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_labels
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : Optional[Any] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ : Optional[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ : int = num_patches + 1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : Tuple = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = ViTModel(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = ViTForMaskedImageModeling(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : List[str] = ViTForMaskedImageModeling(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ : int = model(lowercase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Tuple = ViTForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : Tuple = ViTForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ : Dict = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ViTModelTester(self)
SCREAMING_SNAKE_CASE_ : Optional[Any] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''')
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowercase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
SCREAMING_SNAKE_CASE_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Optional[int] = ViTModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def _A () -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''') if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''').to(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_ : Any = image_processor(images=lowercase_ , return_tensors='''pt''').to(lowercase_)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict = model(**lowercase_)
# verify the logits
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([-0.27_44, 0.82_15, -0.08_36]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = ViTModel.from_pretrained('''facebook/dino-vits8''').to(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=480)
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE_ : List[str] = image_processor(images=lowercase_ , return_tensors='''pt''')
SCREAMING_SNAKE_CASE_ : int = inputs.pixel_values.to(lowercase_)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[str] = model(lowercase_ , interpolate_pos_encoding=lowercase_)
# verify the logits
SCREAMING_SNAKE_CASE_ : int = torch.Size((1, 3601, 384))
self.assertEqual(outputs.last_hidden_state.shape , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''')
SCREAMING_SNAKE_CASE_ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Tuple = prepare_img()
SCREAMING_SNAKE_CASE_ : List[str] = image_processor(images=lowercase_ , return_tensors='''pt''')
SCREAMING_SNAKE_CASE_ : str = inputs.pixel_values.to(lowercase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict = model(lowercase_)
| 512 | 1 |
"""simple docstring"""
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Any = abs(snake_case__ )
lowercase__ : Optional[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : List[str] = abs(snake_case__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
return sum(int(snake_case__ ) for c in str(abs(snake_case__ ) ) )
def a_ ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCAmelCase : Callable , _lowerCAmelCase : int ) -> None:
lowercase__ : str = f"""{func.__name__}({value})"""
lowercase__ : Any = timeit(f"""__main__.{call}""" , setup='import __main__' )
print(f"""{call:56} = {func(snake_case__ )} -- {timing:.4f} seconds""" )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(snake_case__ , snake_case__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 700 | """simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a_ ( _lowerCAmelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
'''simple docstring'''
lowercase__ : Any = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_lowerCAmelCase , _lowerCAmelCase )
# Predict target for test data
lowercase__ : str = xgb.predict(_lowerCAmelCase )
lowercase__ : Union[str, Any] = predictions.reshape(len(_lowerCAmelCase ) , 1 )
return predictions
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = fetch_california_housing()
lowercase__ , lowercase__ : str = data_handling(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = train_test_split(
_lowerCAmelCase , _lowerCAmelCase , test_size=0.2_5 , random_state=1 )
lowercase__ : Any = xgboost(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 645 | 0 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class __magic_name__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> Optional[Any]:
return F"gaussian_noise_s={seed}_shape={'_'.join([str(_lowercase ) for s in shape] )}.npy"
def UpperCAmelCase_ ( self )-> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCAmelCase_ ( self , _lowercase=0 , _lowercase=(4, 4, 64, 64) , _lowercase=False )-> Optional[Any]:
UpperCamelCase_ = jnp.bfloataa if fpaa else jnp.floataa
UpperCamelCase_ = jnp.array(load_hf_numpy(self.get_file_format(_lowercase , _lowercase ) ) , dtype=_lowercase )
return image
def UpperCAmelCase_ ( self , _lowercase=False , _lowercase="CompVis/stable-diffusion-v1-4" )-> Optional[int]:
UpperCamelCase_ = jnp.bfloataa if fpaa else jnp.floataa
UpperCamelCase_ = "bf16" if fpaa else None
UpperCamelCase_ , UpperCamelCase_ = FlaxUNetaDConditionModel.from_pretrained(
_lowercase , subfolder="unet" , dtype=_lowercase , revision=_lowercase )
return model, params
def UpperCAmelCase_ ( self , _lowercase=0 , _lowercase=(4, 77, 768) , _lowercase=False )-> Optional[int]:
UpperCamelCase_ = jnp.bfloataa if fpaa else jnp.floataa
UpperCamelCase_ = jnp.array(load_hf_numpy(self.get_file_format(_lowercase , _lowercase ) ) , dtype=_lowercase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 1_000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
] )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> List[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=_lowercase )
UpperCamelCase_ = self.get_latents(_lowercase , fpaa=_lowercase )
UpperCamelCase_ = self.get_encoder_hidden_states(_lowercase , fpaa=_lowercase )
UpperCamelCase_ = model.apply(
{"params": params} , _lowercase , jnp.array(_lowercase , dtype=jnp.intaa ) , encoder_hidden_states=_lowercase , ).sample
assert sample.shape == latents.shape
UpperCamelCase_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
UpperCamelCase_ = jnp.array(_lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_lowercase , _lowercase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 1_000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
] )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Optional[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=_lowercase )
UpperCamelCase_ = self.get_latents(_lowercase , shape=(4, 4, 96, 96) , fpaa=_lowercase )
UpperCamelCase_ = self.get_encoder_hidden_states(_lowercase , shape=(4, 77, 1_024) , fpaa=_lowercase )
UpperCamelCase_ = model.apply(
{"params": params} , _lowercase , jnp.array(_lowercase , dtype=jnp.intaa ) , encoder_hidden_states=_lowercase , ).sample
assert sample.shape == latents.shape
UpperCamelCase_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
UpperCamelCase_ = jnp.array(_lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_lowercase , _lowercase , atol=1e-2 )
| 628 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Dict = (KDPMaDiscreteScheduler,)
UpperCamelCase_ :str = 1_0
def UpperCAmelCase_ ( self , **_lowercase )-> str:
UpperCamelCase_ = {
"num_train_timesteps": 1_100,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**_lowercase )
return config
def UpperCAmelCase_ ( self )-> Union[str, Any]:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def UpperCAmelCase_ ( self )-> int:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def UpperCAmelCase_ ( self )-> str:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowercase )
def UpperCAmelCase_ ( self )-> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCamelCase_ = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase_ = sample.to(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase_ = scheduler.scale_model_input(_lowercase , _lowercase )
UpperCamelCase_ = model(_lowercase , _lowercase )
UpperCamelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase )
UpperCamelCase_ = output.prev_sample
UpperCamelCase_ = torch.sum(torch.abs(_lowercase ) )
UpperCamelCase_ = torch.mean(torch.abs(_lowercase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_002 ) < 1e-3
def UpperCAmelCase_ ( self )-> Dict:
if torch_device == "mps":
return
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase_ = sample.to(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase_ = scheduler.scale_model_input(_lowercase , _lowercase )
UpperCamelCase_ = model(_lowercase , _lowercase )
UpperCamelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase )
UpperCamelCase_ = output.prev_sample
UpperCamelCase_ = torch.sum(torch.abs(_lowercase ) )
UpperCamelCase_ = torch.mean(torch.abs(_lowercase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
def UpperCAmelCase_ ( self )-> Optional[int]:
if torch_device == "mps":
return
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowercase )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter.to(_lowercase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCamelCase_ = scheduler.scale_model_input(_lowercase , _lowercase )
UpperCamelCase_ = model(_lowercase , _lowercase )
UpperCamelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase )
UpperCamelCase_ = output.prev_sample
UpperCamelCase_ = torch.sum(torch.abs(_lowercase ) )
UpperCamelCase_ = torch.mean(torch.abs(_lowercase ) )
if str(_lowercase ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
| 628 | 1 |
"""simple docstring"""
from collections import deque
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int ) -> None:
"""simple docstring"""
A_ = process_name # process name
A_ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
A_ = arrival_time
A_ = burst_time # remaining burst time
A_ = 0 # total time of the process wait in ready queue
A_ = 0 # time from arrival time to completion time
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , _snake_case : int , _snake_case : list[int] , _snake_case : deque[Process] , _snake_case : int , ) -> None:
"""simple docstring"""
A_ = number_of_queues
# time slice of queues that round robin algorithm applied
A_ = time_slices
# unfinished process is in this ready_queue
A_ = queue
# current time
A_ = current_time
# finished process is in this sequence queue
A_ = deque()
def lowerCamelCase__ ( self : List[str] ) -> list[str]:
"""simple docstring"""
A_ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowerCamelCase__ ( self : str , _snake_case : list[Process] ) -> list[int]:
"""simple docstring"""
A_ = []
for i in range(len(_snake_case ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowerCamelCase__ ( self : Tuple , _snake_case : list[Process] ) -> list[int]:
"""simple docstring"""
A_ = []
for i in range(len(_snake_case ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowerCamelCase__ ( self : Tuple , _snake_case : list[Process] ) -> list[int]:
"""simple docstring"""
A_ = []
for i in range(len(_snake_case ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowerCamelCase__ ( self : Optional[int] , _snake_case : deque[Process] ) -> list[int]:
"""simple docstring"""
return [q.burst_time for q in queue]
def lowerCamelCase__ ( self : int , _snake_case : Process ) -> int:
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowerCamelCase__ ( self : Dict , _snake_case : deque[Process] ) -> deque[Process]:
"""simple docstring"""
A_ = deque() # sequence deque of finished process
while len(_snake_case ) != 0:
A_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_snake_case )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
A_ = 0
# set the process's turnaround time because it is finished
A_ = self.current_time - cp.arrival_time
# set the completion time
A_ = self.current_time
# add the process to queue that has finished queue
finished.append(_snake_case )
self.finish_queue.extend(_snake_case ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowerCamelCase__ ( self : List[str] , _snake_case : deque[Process] , _snake_case : int ) -> tuple[deque[Process], deque[Process]]:
"""simple docstring"""
A_ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_snake_case ) ):
A_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_snake_case )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
A_ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_snake_case )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
A_ = 0
# set the finish time
A_ = self.current_time
# update the process' turnaround time because it is finished
A_ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_snake_case )
self.finish_queue.extend(_snake_case ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowerCamelCase__ ( self : Any ) -> deque[Process]:
"""simple docstring"""
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
A_ , A_ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
UpperCamelCase_ : Tuple = Process('''P1''', 0, 53)
UpperCamelCase_ : List[Any] = Process('''P2''', 0, 17)
UpperCamelCase_ : Optional[int] = Process('''P3''', 0, 68)
UpperCamelCase_ : Union[str, Any] = Process('''P4''', 0, 24)
UpperCamelCase_ : Any = 3
UpperCamelCase_ : List[str] = [17, 25]
UpperCamelCase_ : List[str] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
UpperCamelCase_ : str = Process('''P1''', 0, 53)
UpperCamelCase_ : Dict = Process('''P2''', 0, 17)
UpperCamelCase_ : List[Any] = Process('''P3''', 0, 68)
UpperCamelCase_ : int = Process('''P4''', 0, 24)
UpperCamelCase_ : Dict = 3
UpperCamelCase_ : str = [17, 25]
UpperCamelCase_ : Optional[int] = deque([Pa, Pa, Pa, Pa])
UpperCamelCase_ : str = MLFQ(number_of_queues, time_slices, queue, 0)
UpperCamelCase_ : int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 702 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def lowerCamelCase__ ( *_snake_case : int , **_snake_case : Optional[Any] ) -> str:
"""simple docstring"""
pass
def A_ (__a ):
'''simple docstring'''
A_ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def A_ (__a ):
'''simple docstring'''
A_ = np.array(__a )
A_ = npimg.shape
return {"hash": hashimage(__a ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
snake_case = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCamelCase__ ( self : List[Any] , _snake_case : List[str] , _snake_case : Dict , _snake_case : int ) -> str:
"""simple docstring"""
A_ = MaskGenerationPipeline(model=_snake_case , image_processor=_snake_case )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase__ ( self : int , _snake_case : int , _snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
pass
@slow
@require_torch
def lowerCamelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
A_ = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
A_ = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
A_ = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_snake_case ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_4_4_4},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_2_1},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_1_6_7},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_1_3_2},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_0_5_3},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.9_9_6_7},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_9_3},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.9_9_0_9},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.9_8_7_9},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.9_8_3_4},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.9_7_1_6},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.9_6_1_2},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.9_5_9_9},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.9_5_5_2},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.9_5_3_2},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.9_5_1_6},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.9_4_9_9},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.9_4_8_3},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.9_4_6_4},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_4_3},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_4_3},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.9_4_0_8},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.9_3_3_5},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.9_3_2_6},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.9_2_6_2},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.8_9_9_9},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.8_9_8_6},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.8_9_8_4},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.8_8_7_3},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
A_ = "facebook/sam-vit-huge"
A_ = pipeline("mask-generation" , model=_snake_case )
A_ = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
A_ = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_snake_case ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_4_4_4},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_2_1_0},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_1_6_7},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_1_3_2},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_0_5_3},
] , )
| 482 | 0 |
def lowerCamelCase__ (_UpperCAmelCase = 50):
SCREAMING_SNAKE_CASE = [[0] * 3 for _ in range(length + 1)]
for row_length in range(length + 1):
for tile_length in range(2 , 5):
for tile_start in range(row_length - tile_length + 1):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length])
if __name__ == "__main__":
print(f"""{solution() = }""")
| 73 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : Any = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 457 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
a = random.Random()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=1.0 , UpperCAmelCase__=None , UpperCAmelCase__=None ) -> Dict:
if rng is None:
lowercase_ = global_rng
lowercase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCamelCase__ ( unittest.TestCase ):
def __init__( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=7 , UpperCamelCase__ : str=400 , UpperCamelCase__ : Union[str, Any]=2_000 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Optional[Any]=160 , UpperCamelCase__ : Dict=8 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : List[Any]=4_000 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[Any]=True , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = min_seq_length
lowercase_ = max_seq_length
lowercase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase_ = padding_value
lowercase_ = sampling_rate
lowercase_ = return_attention_mask
lowercase_ = do_normalize
lowercase_ = feature_size
lowercase_ = chunk_length
lowercase_ = hop_length
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=False ):
'''simple docstring'''
def _flatten(UpperCamelCase__ : Optional[Any] ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
lowercase_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase_ = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
lowercase_ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
lowercase_ = feat_extract_first.to_dict()
lowercase_ = feat_extract_second.to_dict()
lowercase_ = feat_extract_first.mel_filters
lowercase_ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ = os.path.join(UpperCamelCase__ , """feat_extract.json""" )
feat_extract_first.to_json_file(UpperCamelCase__ )
lowercase_ = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
lowercase_ = feat_extract_first.to_dict()
lowercase_ = feat_extract_second.to_dict()
lowercase_ = feat_extract_first.mel_filters
lowercase_ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowercase_ = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
lowercase_ = feature_extractor(UpperCamelCase__ , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowercase_ = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
lowercase_ = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test batched
lowercase_ = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_features
lowercase_ = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowercase_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase_ = np.asarray(UpperCamelCase__ )
lowercase_ = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_features
lowercase_ = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test truncation required
lowercase_ = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
lowercase_ = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
lowercase_ = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowercase_ = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs_truncated]
lowercase_ = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_features
lowercase_ = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
import torch
lowercase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ = np.random.rand(100 , 32 ).astype(np.floataa )
lowercase_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase_ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowercase_ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : Any ):
'''simple docstring'''
lowercase_ = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowercase_ = ds.sort("""id""" ).select(range(UpperCamelCase__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
lowercase_ = self._load_datasamples(1 )
lowercase_ = WhisperFeatureExtractor()
lowercase_ = feature_extractor(UpperCamelCase__ , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , UpperCamelCase__ , atol=1e-4 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ = self._load_datasamples(1 )[0]
lowercase_ = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
lowercase_ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=UpperCamelCase__ )[0]
self.assertTrue(np.all(np.mean(UpperCamelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase__ ) - 1 ) < 1e-3 ) )
| 712 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
a = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the training data.'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the validation data.'} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} )
__SCREAMING_SNAKE_CASE : int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} )
__SCREAMING_SNAKE_CASE : float = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = {}
if self.train_dir is not None:
lowercase_ = self.train_dir
if self.validation_dir is not None:
lowercase_ = self.validation_dir
lowercase_ = data_files if data_files else None
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = field(
default=__magic_name__ , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__magic_name__ )} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
__SCREAMING_SNAKE_CASE : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__SCREAMING_SNAKE_CASE : str = field(default=__magic_name__ , metadata={'help': 'Name or path of preprocessor config.'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={'help': 'Stride to use for the encoder.'} , )
class UpperCamelCase__ :
def __init__( self : Dict , UpperCamelCase__ : List[Any]=192 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : str=0.6 ):
'''simple docstring'''
lowercase_ = input_size
lowercase_ = mask_patch_size
lowercase_ = model_patch_size
lowercase_ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
lowercase_ = self.input_size // self.mask_patch_size
lowercase_ = self.mask_patch_size // self.model_patch_size
lowercase_ = self.rand_size**2
lowercase_ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : int ):
'''simple docstring'''
lowercase_ = np.random.permutation(self.token_count )[: self.mask_count]
lowercase_ = np.zeros(self.token_count , dtype=UpperCamelCase__ )
lowercase_ = 1
lowercase_ = mask.reshape((self.rand_size, self.rand_size) )
lowercase_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.stack([example["""pixel_values"""] for example in examples] )
lowercase_ = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def UpperCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , UpperCAmelCase__ , UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0:
lowercase_ = ds["""train"""].train_test_split(data_args.train_val_split )
lowercase_ = split["""train"""]
lowercase_ = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(UpperCAmelCase__ , """decoder_type""" ):
lowercase_ = """simmim"""
# adapt config
lowercase_ = model_args.image_size if model_args.image_size is not None else config.image_size
lowercase_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowercase_ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowercase_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowercase_ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowercase_ = AutoModelForMaskedImageModeling.from_config(UpperCAmelCase__ )
if training_args.do_train:
lowercase_ = ds["""train"""].column_names
else:
lowercase_ = ds["""validation"""].column_names
if data_args.image_column_name is not None:
lowercase_ = data_args.image_column_name
elif "image" in column_names:
lowercase_ = """image"""
elif "img" in column_names:
lowercase_ = """img"""
else:
lowercase_ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowercase_ = Compose(
[
Lambda(lambda UpperCAmelCase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowercase_ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(UpperCAmelCase__ ):
lowercase_ = [transforms(UpperCAmelCase__ ) for image in examples[image_column_name]]
lowercase_ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
lowercase_ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCAmelCase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
lowercase_ = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCAmelCase__ )
# Initialize our trainer
lowercase_ = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
lowercase_ = None
if training_args.resume_from_checkpoint is not None:
lowercase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ = last_checkpoint
lowercase_ = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ = trainer.evaluate()
trainer.log_metrics("""eval""" , UpperCAmelCase__ )
trainer.save_metrics("""eval""" , UpperCAmelCase__ )
# Write model card and (optionally) push to hub
lowercase_ = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase__ )
else:
trainer.create_model_card(**UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 650 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = DebertaVaTokenizer
__UpperCAmelCase : Dict = DebertaVaTokenizerFast
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Any = True
def __lowercase ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_a : Dict = DebertaVaTokenizer(_a ,unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : Optional[Any] ,_a : str ):
'''simple docstring'''
_a : List[str] = 'this is a test'
_a : List[str] = 'this is a test'
return input_text, output_text
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Any = '<pad>'
_a : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) ,_a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) ,_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<pad>' )
self.assertEqual(vocab_keys[1] ,'<unk>' )
self.assertEqual(vocab_keys[-1] ,'[PAD]' )
self.assertEqual(len(_a ) ,3_0001 )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,3_0000 )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = ' \tHeLLo!how \n Are yoU? '
_a : Union[str, Any] = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
_a : Union[str, Any] = DebertaVaTokenizer(_a ,do_lower_case=_a )
_a : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_a ,add_special_tokens=_a ) )
self.assertListEqual(_a ,_a )
_a : List[Any] = DebertaVaTokenizerFast(_a ,do_lower_case=_a )
_a : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_a ,add_special_tokens=_a ) )
self.assertListEqual(_a ,_a )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def __lowercase ( self : int ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Tuple = 'I was born in 92000, and this is falsé.'
_a : Tuple = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_a : List[str] = DebertaVaTokenizer(_a ,split_by_punct=_a )
_a : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_a ,add_special_tokens=_a ) )
self.assertListEqual(_a ,_a )
_a : List[str] = DebertaVaTokenizerFast(_a ,split_by_punct=_a )
_a : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_a ,add_special_tokens=_a ) )
self.assertListEqual(_a ,_a )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Tuple = 'I was born in 92000, and this is falsé.'
_a : Optional[Any] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_a : Dict = DebertaVaTokenizer(_a ,do_lower_case=_a ,split_by_punct=_a )
_a : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_a ,add_special_tokens=_a ) )
self.assertListEqual(_a ,_a )
_a : str = DebertaVaTokenizerFast(_a ,do_lower_case=_a ,split_by_punct=_a )
_a : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_a ,add_special_tokens=_a ) )
self.assertListEqual(_a ,_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Union[str, Any] = 'I was born in 92000, and this is falsé.'
_a : Optional[Any] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
_a : Union[str, Any] = DebertaVaTokenizer(_a ,do_lower_case=_a ,split_by_punct=_a )
_a : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_a ,add_special_tokens=_a ) )
self.assertListEqual(_a ,_a )
_a : List[str] = DebertaVaTokenizerFast(_a ,do_lower_case=_a ,split_by_punct=_a )
_a : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_a ,add_special_tokens=_a ) )
self.assertListEqual(_a ,_a )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : List[Any] = 'I was born in 92000, and this is falsé.'
_a : str = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_a : Any = DebertaVaTokenizer(_a ,do_lower_case=_a ,split_by_punct=_a )
_a : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_a ,add_special_tokens=_a ) )
self.assertListEqual(_a ,_a )
_a : Optional[Any] = DebertaVaTokenizerFast(_a ,do_lower_case=_a ,split_by_punct=_a )
_a : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_a ,add_special_tokens=_a ) )
self.assertListEqual(_a ,_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Optional[int] = ' \tHeLLo!how \n Are yoU? '
_a : Dict = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
_a : Dict = DebertaVaTokenizer(_a ,do_lower_case=_a ,split_by_punct=_a )
_a : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_a ,add_special_tokens=_a ) )
self.assertListEqual(_a ,_a )
_a : List[Any] = DebertaVaTokenizerFast(_a ,do_lower_case=_a ,split_by_punct=_a )
_a : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_a ,add_special_tokens=_a ) )
self.assertListEqual(_a ,_a )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Union[str, Any] = self.get_tokenizer()
_a : Any = self.get_rust_tokenizer()
_a : Optional[Any] = 'I was born in 92000, and this is falsé.'
_a : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_a ,add_special_tokens=_a ) )
_a : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_a ,add_special_tokens=_a ) )
self.assertListEqual(_a ,_a )
_a : Any = tokenizer.encode(_a ,add_special_tokens=_a )
_a : Tuple = rust_tokenizer.encode(_a ,add_special_tokens=_a )
self.assertListEqual(_a ,_a )
_a : str = self.get_rust_tokenizer()
_a : str = tokenizer.encode(_a )
_a : Union[str, Any] = rust_tokenizer.encode(_a )
self.assertListEqual(_a ,_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Tuple = 'This is a test'
_a : Any = [13, 1, 4398, 25, 21, 1289]
_a : Optional[int] = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
_a : Tuple = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
_a : Union[str, Any] = DebertaVaTokenizer(_a ,keep_accents=_a )
_a : int = DebertaVaTokenizerFast(_a ,keep_accents=_a )
_a : Optional[Any] = tokenizer.encode(_a ,add_special_tokens=_a )
self.assertListEqual(_a ,_a )
_a : str = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : Any = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(_a ,_a )
_a : List[str] = rust_tokenizer.encode(_a ,add_special_tokens=_a )
self.assertListEqual(_a ,_a )
_a : int = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : Optional[int] = rust_tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(_a ,_a )
# fmt: off
_a : Optional[int] = 'I was born in 92000, and this is falsé.'
_a : Tuple = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
_a : Optional[Any] = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
_a : List[str] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
_a : int = tokenizer.encode(_a ,add_special_tokens=_a )
self.assertListEqual(_a ,_a )
_a : int = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : Optional[int] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(_a ,_a )
_a : str = rust_tokenizer.encode(_a ,add_special_tokens=_a )
self.assertListEqual(_a ,_a )
_a : Union[str, Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : Any = rust_tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(_a ,_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Tuple = DebertaVaTokenizer(_a )
_a : List[str] = tokenizer.encode('sequence builders' )
_a : str = tokenizer.encode('multi-sequence build' )
_a : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_a )
_a : Optional[int] = tokenizer.build_inputs_with_special_tokens(_a ,_a )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,_a )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,_a ,)
@slow
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Tuple = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a ,model_name='microsoft/deberta-v2-xlarge' ,revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' ,)
| 229 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = '''gpt_neo'''
__UpperCAmelCase : Optional[int] = ['''past_key_values''']
__UpperCAmelCase : Optional[int] = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] ,_a : Optional[int]=5_0257 ,_a : Tuple=2048 ,_a : Optional[int]=2048 ,_a : Any=24 ,_a : Tuple=[[["global", "local"], 12]] ,_a : Union[str, Any]=16 ,_a : List[Any]=None ,_a : Optional[int]=256 ,_a : Optional[Any]="gelu_new" ,_a : List[Any]=0.0 ,_a : Optional[int]=0.0 ,_a : List[Any]=0.0 ,_a : Union[str, Any]=0.1 ,_a : Optional[Any]=1E-5 ,_a : Optional[Any]=0.02 ,_a : str=True ,_a : Any=5_0256 ,_a : Tuple=5_0256 ,**_a : List[str] ,):
'''simple docstring'''
_a : Dict = vocab_size
_a : Union[str, Any] = max_position_embeddings
_a : List[str] = hidden_size
_a : Optional[Any] = num_layers
_a : Optional[Any] = num_heads
_a : Dict = intermediate_size
_a : Any = window_size
_a : List[str] = activation_function
_a : int = resid_dropout
_a : Tuple = embed_dropout
_a : int = attention_dropout
_a : Dict = classifier_dropout
_a : Tuple = layer_norm_epsilon
_a : List[str] = initializer_range
_a : str = use_cache
_a : List[str] = bos_token_id
_a : Optional[Any] = eos_token_id
_a : Tuple = attention_types
_a : Union[str, Any] = self.expand_attention_types_params(_a )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
F"""`config.num_layers = {self.num_layers}`. """
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
@staticmethod
def __lowercase ( _a : Dict ):
'''simple docstring'''
_a : Dict = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def UpperCAmelCase_ (__a : str , __a : Optional[int] , __a : Tuple , __a : Dict ):
"""simple docstring"""
import torch
_a : Tuple = input.size()
_a : Union[str, Any] = len(__a )
_a : Union[str, Any] = shape[dimension]
_a : str = torch.arange(0 , __a , __a )
_a : Optional[Any] = torch.div(sizedim - size , __a , rounding_mode='floor' ) + 1
_a : str = torch.arange(__a ) + low_indices[:min_length][:, None]
_a : Optional[Any] = [slice(__a )] * rank
_a : Dict = indices
_a : List[str] = input[s]
_a : Optional[int] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__a )
def UpperCAmelCase_ (__a : str , __a : Optional[int] ):
"""simple docstring"""
import torch
_a : List[str] = torch.arange(1 , __a )
_a : int = torch.remainder(__a , __a )
_a : Tuple = remainders == 0
_a : Optional[Any] = candidates[divisor_indices]
_a : List[Any] = torch.max(__a )
return largest_divisor, torch.div(__a , __a , rounding_mode='floor' )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
@property
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(_a ,direction='inputs' )
_a : Optional[int] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_a : List[str] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return self._config.num_heads
def __lowercase ( self : Any ,_a : PreTrainedTokenizer ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional[TensorType] = None ,):
'''simple docstring'''
_a : Dict = super(_a ,self ).generate_dummy_inputs(
_a ,batch_size=_a ,seq_length=_a ,is_pair=_a ,framework=_a )
# We need to order the input in the way they appears in the forward()
_a : Union[str, Any] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_a, _a : Dict = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_a : Any = seqlen + 2
_a : str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_a : Tuple = [
(torch.zeros(_a ), torch.zeros(_a )) for _ in range(self.num_layers )
]
_a : List[str] = common_inputs['attention_mask']
if self.use_past:
_a : Optional[int] = ordered_inputs['attention_mask'].dtype
_a : Optional[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_a ,_a ,dtype=_a )] ,dim=1 )
return ordered_inputs
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return 13
| 229 | 1 |
"""simple docstring"""
from __future__ import annotations
UpperCamelCase_ : List[Any] = []
def __lowercase ( a : list[list[int]] , a : int , a : int ) -> bool:
for i in range(len(a ) ):
if board[row][i] == 1:
return False
for i in range(len(a ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(a , -1 , -1 ) , range(a , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(a , -1 , -1 ) , range(a , len(a ) ) ):
if board[i][j] == 1:
return False
return True
def __lowercase ( a : list[list[int]] , a : int ) -> bool:
if row >= len(a ):
solution.append(a )
printboard(a )
print()
return True
for i in range(len(a ) ):
if is_safe(a , a , a ):
__snake_case : int =1
solve(a , row + 1 )
__snake_case : Any =0
return False
def __lowercase ( a : list[list[int]] ) -> None:
for i in range(len(a ) ):
for j in range(len(a ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
UpperCamelCase_ : List[Any] = 8
UpperCamelCase_ : Any = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 701 |
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def __lowercase ( a : Dict ) -> str:
return EnvironmentCommand()
def __lowercase ( a : Optional[int] ) -> List[Any]:
return EnvironmentCommand(args.accelerate_config_file )
class _lowercase ( lowerCAmelCase ):
@staticmethod
def _UpperCamelCase ( a : ArgumentParser ):
"""simple docstring"""
__snake_case : Optional[int] =parser.add_parser('''env''' )
download_parser.set_defaults(func=a )
download_parser.add_argument(
'''--accelerate-config_file''' , default=a , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=a )
def __init__( self : Any , a : str , *a : Any ):
"""simple docstring"""
__snake_case : int =accelerate_config_file
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
__snake_case : List[str] ='''not installed'''
if is_safetensors_available():
import safetensors
__snake_case : int =safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
__snake_case : List[Any] =f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
__snake_case : Dict ='''not installed'''
__snake_case : Optional[Any] ='''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__snake_case : Union[str, Any] =accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(a ):
__snake_case : int =load_config_from_file(self._accelerate_config_file ).to_dict()
__snake_case : Optional[int] =(
'''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(a , a )
else f'''\t{accelerate_config}'''
)
__snake_case : int ='''not installed'''
__snake_case : Any ='''NA'''
if is_torch_available():
import torch
__snake_case : Tuple =torch.__version__
__snake_case : Optional[Any] =torch.cuda.is_available()
__snake_case : Optional[Any] ='''not installed'''
__snake_case : Any ='''NA'''
if is_tf_available():
import tensorflow as tf
__snake_case : Optional[Any] =tf.__version__
try:
# deprecated in v2.1
__snake_case : str =tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__snake_case : List[Any] =bool(tf.config.list_physical_devices('''GPU''' ) )
__snake_case : Optional[int] ='''not installed'''
__snake_case : Dict ='''not installed'''
__snake_case : List[str] ='''not installed'''
__snake_case : Optional[int] ='''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
__snake_case : Tuple =flax.__version__
__snake_case : Optional[Any] =jax.__version__
__snake_case : str =jaxlib.__version__
__snake_case : Tuple =jax.lib.xla_bridge.get_backend().platform
__snake_case : List[str] ={
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': f'''{safetensors_version}''',
'''Accelerate version''': f'''{accelerate_version}''',
'''Accelerate config''': f'''{accelerate_config_str}''',
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''Tensorflow version (GPU?)''': f'''{tf_version} ({tf_cuda_available})''',
'''Flax version (CPU?/GPU?/TPU?)''': f'''{flax_version} ({jax_backend})''',
'''Jax version''': f'''{jax_version}''',
'''JaxLib version''': f'''{jaxlib_version}''',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(a ) )
return info
@staticmethod
def _UpperCamelCase ( a : List[str] ):
"""simple docstring"""
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 497 | 0 |
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
A_ = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = old_name
if "patch_embed" in old_name:
snake_case_ , snake_case_ , snake_case_ = old_name.split("." )
if layer == "0":
snake_case_ = old_name.replace("0" , "convolution1" )
elif layer == "1":
snake_case_ = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
snake_case_ = old_name.replace("3" , "convolution2" )
else:
snake_case_ = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d" , _A ):
snake_case_ = R"\b\d{2}\b"
if bool(re.search(_A , _A ) ):
snake_case_ = re.search(R"\d\.\d\d." , _A ).group()
else:
snake_case_ = re.search(R"\d\.\d." , _A ).group()
if int(match[0] ) < 6:
snake_case_ = old_name.replace(_A , "" )
snake_case_ = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
snake_case_ = "intermediate_stages." + trimmed_name
else:
snake_case_ = old_name.replace(_A , "" )
if int(match[2] ) < num_meta4D_last_stage:
snake_case_ = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
snake_case_ = str(int(match[2] ) - num_meta4D_last_stage )
snake_case_ = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
snake_case_ = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
snake_case_ = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
snake_case_ = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
snake_case_ = trimmed_name.replace("fc2" , "linear_out" )
snake_case_ = "last_stage." + trimmed_name
elif "network" in old_name and re.search(R".\d." , _A ):
snake_case_ = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
snake_case_ = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
snake_case_ = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
snake_case_ = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
snake_case_ = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
snake_case_ = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
snake_case_ = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
snake_case_ = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
snake_case_ = new_name.replace("norm" , "layernorm" )
snake_case_ = "efficientformer." + new_name
else:
snake_case_ = "efficientformer.encoder." + new_name
return new_name
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
for key in checkpoint.copy().keys():
snake_case_ = checkpoint.pop(_A )
snake_case_ = val
return checkpoint
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ = Image.open(requests.get(_A , stream=_A ).raw )
return image
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = torch.load(_A , map_location="cpu" )["model"]
snake_case_ = EfficientFormerConfig.from_json_file(_A )
snake_case_ = EfficientFormerForImageClassificationWithTeacher(_A )
snake_case_ = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
snake_case_ = config.depths[-1] - config.num_metaad_blocks + 1
snake_case_ = convert_torch_checkpoint(_A , _A )
model.load_state_dict(_A )
model.eval()
snake_case_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
snake_case_ = prepare_img()
snake_case_ = 256
snake_case_ = 224
snake_case_ = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
snake_case_ = processor(images=_A , return_tensors="pt" ).pixel_values
# original processing pipeline
snake_case_ = Compose(
[
Resize(_A , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(_A ),
ToTensor(),
Normalize(_A , _A ),
] )
snake_case_ = image_transforms(_A ).unsqueeze(0 )
assert torch.allclose(_A , _A )
snake_case_ = model(_A )
snake_case_ = outputs.logits
snake_case_ = (1, 1000)
if "l1" in model_name:
snake_case_ = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :10] , _A , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
snake_case_ = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :10] , _A , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
snake_case_ = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7" )
# Save Checkpoints
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
processor.save_pretrained(_A )
print(f"Processor successfuly saved at {pytorch_dump_path}" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}" , commit_message="Add model" , use_temp_dir=_A , )
processor.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}" , commit_message="Add image processor" , use_temp_dir=_A , )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
lowercase__ : Optional[Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 376 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def lowerCamelCase__ ( a , a=False ):
__snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def lowerCamelCase__ ( a , a , a=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__snake_case = ''
else:
__snake_case = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
__snake_case = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case = in_proj_weight[
: config.hidden_size, :
]
__snake_case = in_proj_bias[: config.hidden_size]
__snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case = in_proj_weight[
-config.hidden_size :, :
]
__snake_case = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( a , a , a ):
__snake_case = dct.pop(lowerCamelCase_ )
__snake_case = val
def lowerCamelCase__ ( ):
__snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( a , a ):
__snake_case = DeiTConfig()
# all deit models have fine-tuned heads
__snake_case = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__snake_case = 1000
__snake_case = 'huggingface/label-files'
__snake_case = 'imagenet-1k-id2label.json'
__snake_case = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
__snake_case = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
__snake_case = int(deit_name[-6:-4] )
__snake_case = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
__snake_case = 192
__snake_case = 768
__snake_case = 12
__snake_case = 3
elif deit_name[9:].startswith('small' ):
__snake_case = 384
__snake_case = 1536
__snake_case = 12
__snake_case = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
__snake_case = 1024
__snake_case = 4096
__snake_case = 24
__snake_case = 16
# load original model from timm
__snake_case = timm.create_model(lowerCamelCase_ , pretrained=lowerCamelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__snake_case = timm_model.state_dict()
__snake_case = create_rename_keys(lowerCamelCase_ , lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# load HuggingFace model
__snake_case = DeiTForImageClassificationWithTeacher(lowerCamelCase_ ).eval()
model.load_state_dict(lowerCamelCase_ )
# Check outputs on an image, prepared by DeiTImageProcessor
__snake_case = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__snake_case = DeiTImageProcessor(size=lowerCamelCase_ , crop_size=config.image_size )
__snake_case = image_processor(images=prepare_img() , return_tensors='pt' )
__snake_case = encoding['pixel_values']
__snake_case = model(lowerCamelCase_ )
__snake_case = timm_model(lowerCamelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase_ , outputs.logits , atol=1E-3 )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowercase = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 706 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 427 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = (DDIMParallelScheduler,)
lowerCAmelCase = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ : Dict = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_, UpperCAmelCase_ : Tuple = 10, 0.0
UpperCAmelCase_ : List[str] = self.dummy_model()
UpperCAmelCase_ : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for t in scheduler.timesteps:
UpperCAmelCase_ : Tuple = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).prev_sample
return sample
def a__ ( self ) -> Optional[int]:
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : Dict = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ : str = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) )
def a__ ( self ) -> Optional[int]:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE ,beta_end=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Dict:
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE ,prediction_type=_SCREAMING_SNAKE_CASE ,sample_max_value=_SCREAMING_SNAKE_CASE ,)
def a__ ( self ) -> str:
for t in [1, 10, 49]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ):
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE ,num_inference_steps=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[Any]:
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE ,eta=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : int = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1e-5
def a__ ( self ) -> Any:
UpperCAmelCase_ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[Any] = self.get_scheduler_config()
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_, UpperCAmelCase_ : Any = 10, 0.0
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = self.dummy_model()
UpperCAmelCase_ : Optional[Any] = self.dummy_sample_deter
UpperCAmelCase_ : Dict = self.dummy_sample_deter + 0.1
UpperCAmelCase_ : Union[str, Any] = self.dummy_sample_deter - 0.1
UpperCAmelCase_ : Optional[Any] = samplea.shape[0]
UpperCAmelCase_ : Any = torch.stack([samplea, samplea, samplea] ,dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
UpperCAmelCase_ : Optional[Any] = scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : List[str] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : str = self.full_loop()
UpperCAmelCase_ : Optional[Any] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : List[Any] = self.full_loop(prediction_type='''v_prediction''' )
UpperCAmelCase_ : Union[str, Any] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Any = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def a__ ( self ) -> Union[str, Any]:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : List[Any] = self.full_loop(set_alpha_to_one=_SCREAMING_SNAKE_CASE ,beta_start=0.01 )
UpperCAmelCase_ : Optional[Any] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : int = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def a__ ( self ) -> str:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : List[Any] = self.full_loop(set_alpha_to_one=_SCREAMING_SNAKE_CASE ,beta_start=0.01 )
UpperCAmelCase_ : Union[str, Any] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Any = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3 | 30 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Any = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'xglm'
SCREAMING_SNAKE_CASE = ['past_key_values']
SCREAMING_SNAKE_CASE = {
'num_attention_heads': 'attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any]=25_6008 , _SCREAMING_SNAKE_CASE: Dict=2048 , _SCREAMING_SNAKE_CASE: int=1024 , _SCREAMING_SNAKE_CASE: Dict=4096 , _SCREAMING_SNAKE_CASE: Optional[Any]=24 , _SCREAMING_SNAKE_CASE: int=16 , _SCREAMING_SNAKE_CASE: List[str]="gelu" , _SCREAMING_SNAKE_CASE: Optional[Any]=0.1 , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: List[Any]=0.0 , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.0 , _SCREAMING_SNAKE_CASE: Any=0.02 , _SCREAMING_SNAKE_CASE: Any=True , _SCREAMING_SNAKE_CASE: Tuple=True , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: str=1 , _SCREAMING_SNAKE_CASE: Dict=0 , _SCREAMING_SNAKE_CASE: Dict=2 , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : int = max_position_embeddings
__lowerCAmelCase : Optional[Any] = d_model
__lowerCAmelCase : List[Any] = ffn_dim
__lowerCAmelCase : int = num_layers
__lowerCAmelCase : Any = attention_heads
__lowerCAmelCase : int = activation_function
__lowerCAmelCase : List[Any] = dropout
__lowerCAmelCase : Optional[int] = attention_dropout
__lowerCAmelCase : Optional[int] = activation_dropout
__lowerCAmelCase : Optional[int] = layerdrop
__lowerCAmelCase : Optional[int] = init_std
__lowerCAmelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase : Dict = use_cache
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) | 293 | 0 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : Union[str, "sqlalchemy.sql.Selectable"] , UpperCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , UpperCamelCase__ : Optional[Features] = None , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Tuple , ) -> Dict:
super().__init__(features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ , **UpperCamelCase__ )
_UpperCamelCase =Sql(
cache_dir=UpperCamelCase__ , features=UpperCamelCase__ , sql=UpperCamelCase__ , con=UpperCamelCase__ , **UpperCamelCase__ , )
def UpperCamelCase__ ( self : Dict ) -> str:
_UpperCamelCase =None
_UpperCamelCase =None
_UpperCamelCase =None
_UpperCamelCase =None
self.builder.download_and_prepare(
download_config=UpperCamelCase__ , download_mode=UpperCamelCase__ , verification_mode=UpperCamelCase__ , base_path=UpperCamelCase__ , )
# Build dataset for splits
_UpperCamelCase =self.builder.as_dataset(
split='''train''' , verification_mode=UpperCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : List[Any] , ) -> Optional[int]:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
_UpperCamelCase =dataset
_UpperCamelCase =name
_UpperCamelCase =con
_UpperCamelCase =batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_UpperCamelCase =num_proc
_UpperCamelCase =to_sql_kwargs
def UpperCamelCase__ ( self : Dict ) -> int:
_UpperCamelCase =self.to_sql_kwargs.pop('''sql''' , UpperCamelCase__ )
_UpperCamelCase =self.to_sql_kwargs.pop('''con''' , UpperCamelCase__ )
_UpperCamelCase =self.to_sql_kwargs.pop('''index''' , UpperCamelCase__ )
_UpperCamelCase =self._write(index=UpperCamelCase__ , **self.to_sql_kwargs )
return written
def UpperCamelCase__ ( self : Optional[int] , UpperCamelCase__ : List[Any] ) -> int:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase =args
_UpperCamelCase ={**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
_UpperCamelCase =query_table(
table=self.dataset.data , key=slice(UpperCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
_UpperCamelCase =batch.to_pandas()
_UpperCamelCase =df.to_sql(self.name , self.con , index=UpperCamelCase__ , **UpperCamelCase__ )
return num_rows or len(UpperCamelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Union[str, Any] ) -> int:
_UpperCamelCase =0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_UpperCamelCase , _UpperCamelCase =len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , UpperCamelCase__ , UpperCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 708 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__lowerCamelCase : List[Any] = data_utils.TransfoXLTokenizer
__lowerCamelCase : str = data_utils.TransfoXLCorpus
__lowerCamelCase : Optional[int] = data_utils
__lowerCamelCase : int = data_utils
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__SCREAMING_SNAKE_CASE , '''rb''' ) as fp:
_UpperCamelCase =pickle.load(__SCREAMING_SNAKE_CASE , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_UpperCamelCase =pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(f'''Save vocabulary to {pytorch_vocab_dump_path}''' )
_UpperCamelCase =corpus.vocab.__dict__
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_UpperCamelCase =corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __SCREAMING_SNAKE_CASE )
_UpperCamelCase =pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(f'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_UpperCamelCase =os.path.abspath(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =os.path.abspath(__SCREAMING_SNAKE_CASE )
print(f'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_UpperCamelCase =TransfoXLConfig()
else:
_UpperCamelCase =TransfoXLConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
_UpperCamelCase =TransfoXLLMHeadModel(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =load_tf_weights_in_transfo_xl(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
_UpperCamelCase =os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_UpperCamelCase =os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(f'''Save PyTorch model to {os.path.abspath(__SCREAMING_SNAKE_CASE )}''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
print(f'''Save configuration file to {os.path.abspath(__SCREAMING_SNAKE_CASE )}''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
__lowerCamelCase : Optional[int] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 271 | 0 |
from sklearn.metrics import recall_score
import datasets
_lowerCAmelCase = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
_lowerCAmelCase = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
_lowerCAmelCase = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def snake_case_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def snake_case_ ( self , A__ , A__ , A__=None , A__=1 , A__="binary" , A__=None , A__="warn" , ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = recall_score(
A__ , A__ , labels=A__ , pos_label=A__ , average=A__ , sample_weight=A__ , zero_division=A__ , )
return {"recall": float(A__ ) if score.size == 1 else score} | 137 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = '''swinv2'''
snake_case_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , A__=224 , A__=4 , A__=3 , A__=96 , A__=[2, 2, 6, 2] , A__=[3, 6, 12, 24] , A__=7 , A__=4.0 , A__=True , A__=0.0 , A__=0.0 , A__=0.1 , A__="gelu" , A__=False , A__=0.02 , A__=1E-5 , A__=32 , **A__ , ):
"""simple docstring"""
super().__init__(**A__ )
UpperCAmelCase_: List[str] = image_size
UpperCAmelCase_: List[str] = patch_size
UpperCAmelCase_: str = num_channels
UpperCAmelCase_: Optional[int] = embed_dim
UpperCAmelCase_: str = depths
UpperCAmelCase_: Optional[Any] = len(A__ )
UpperCAmelCase_: Optional[Any] = num_heads
UpperCAmelCase_: Dict = window_size
UpperCAmelCase_: Dict = mlp_ratio
UpperCAmelCase_: Optional[Any] = qkv_bias
UpperCAmelCase_: Optional[Any] = hidden_dropout_prob
UpperCAmelCase_: Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_: int = drop_path_rate
UpperCAmelCase_: Union[str, Any] = hidden_act
UpperCAmelCase_: Any = use_absolute_embeddings
UpperCAmelCase_: Optional[int] = layer_norm_eps
UpperCAmelCase_: str = initializer_range
UpperCAmelCase_: Tuple = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_: Union[str, Any] = int(embed_dim * 2 ** (len(A__ ) - 1) )
UpperCAmelCase_: str = (0, 0, 0, 0) | 137 | 1 |
import numpy as np
def __lowerCAmelCase ( __lowerCamelCase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 456 |
def __lowerCAmelCase ( __lowerCamelCase : int = 3 , __lowerCamelCase : int = 7 , __lowerCamelCase : int = 1000000 ) -> int:
__lowerCAmelCase =0
__lowerCAmelCase =1
for current_denominator in range(1 , limit + 1 ):
__lowerCAmelCase =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__lowerCAmelCase =current_numerator
__lowerCAmelCase =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 456 | 1 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _A ( _lowercase ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowerCamelCase (nn.Module ):
def __init__( self: Union[str, Any],A_: nn.Module,A_: int ):
'''simple docstring'''
super().__init__()
__UpperCamelCase = module
__UpperCamelCase = nn.Sequential(
nn.Linear(module.in_features,A_,bias=A_ ),nn.Linear(A_,module.out_features,bias=A_ ),)
__UpperCamelCase = (2.0 / (5 * min(module.in_features,module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight,std=A_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def snake_case_ ( self: Optional[Any],A_: int,*A_: Union[str, Any],**A_: str ):
'''simple docstring'''
return self.module(A_,*A_,**A_ ) + self.adapter(A_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCamelCase (unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_lowercase = """bigscience/bloom-1b7"""
# Constant values
_lowercase = 2.109659552692574
_lowercase = """Hello my name is"""
_lowercase = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
_lowercase = 10
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained(self.model_name )
class __lowerCamelCase (_a ):
def snake_case_ ( self: int ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name,torch_dtype=torch.floataa,device_map='auto' )
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name,load_in_abit=A_,device_map='auto' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.model_abit.config
self.assertTrue(hasattr(A_,'quantization_config' ) )
__UpperCamelCase = config.to_dict()
__UpperCamelCase = config.to_diff_dict()
__UpperCamelCase = config.to_json_string()
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__UpperCamelCase = self.model_fpaa.get_memory_footprint()
__UpperCamelCase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit,self.EXPECTED_RELATIVE_DIFFERENCE )
__UpperCamelCase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def snake_case_ ( self: str ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A_,torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' )
__UpperCamelCase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ),max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0],skip_special_tokens=A_ ),self.EXPECTED_OUTPUTS )
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = BitsAndBytesConfig()
__UpperCamelCase = True
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name,quantization_config=A_,device_map='auto' )
__UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' )
__UpperCamelCase = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ),max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0],skip_special_tokens=A_ ),self.EXPECTED_OUTPUTS )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
with self.assertRaises(A_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A_ )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = BitsAndBytesConfig()
with self.assertRaises(A_ ):
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name,quantization_config=A_,load_in_abit=A_,device_map='auto',bnb_abit_quant_type='nf4',)
def snake_case_ ( self: Tuple ):
'''simple docstring'''
with self.assertRaises(A_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(A_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(A_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' )
__UpperCamelCase = self.model_fpaa.to(torch.floataa )
__UpperCamelCase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ),max_new_tokens=10 )
# Check this does not throw an error
__UpperCamelCase = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__UpperCamelCase = self.model_fpaa.half()
# Check this does not throw an error
__UpperCamelCase = self.model_fpaa.float()
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('t5-small',load_in_abit=A_,device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCamelCase (unittest.TestCase ):
@classmethod
def snake_case_ ( cls: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 't5-small'
__UpperCamelCase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__UpperCamelCase = AutoTokenizer.from_pretrained(cls.model_name )
__UpperCamelCase = 'Translate in German: Hello, my dog is cute'
def snake_case_ ( self: Dict ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: List[str] ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
__UpperCamelCase = TaForConditionalGeneration._keep_in_fpaa_modules
__UpperCamelCase = None
# test with `t5-small`
__UpperCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name,load_in_abit=A_,device_map='auto' )
__UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' ).to(0 )
__UpperCamelCase = model.generate(**A_ )
# test with `flan-t5-small`
__UpperCamelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name,load_in_abit=A_,device_map='auto' )
__UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' ).to(0 )
__UpperCamelCase = model.generate(**A_ )
__UpperCamelCase = modules
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__UpperCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name,load_in_abit=A_,device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q,bnb.nn.Linearabit ) )
__UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' ).to(0 )
__UpperCamelCase = model.generate(**A_ )
# test with `flan-t5-small`
__UpperCamelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name,load_in_abit=A_,device_map='auto' )
__UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' ).to(0 )
__UpperCamelCase = model.generate(**A_ )
class __lowerCamelCase (_a ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().setUp()
# model_name
__UpperCamelCase = 'bigscience/bloom-560m'
__UpperCamelCase = 't5-small'
# Different types of model
__UpperCamelCase = AutoModel.from_pretrained(self.model_name,load_in_abit=A_,device_map='auto' )
# Sequence classification model
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(
self.model_name,load_in_abit=A_,device_map='auto' )
# CausalLM model
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name,load_in_abit=A_,device_map='auto' )
# Seq2seq model
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name,load_in_abit=A_,device_map='auto' )
def snake_case_ ( self: Any ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: str ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowerCamelCase (_a ):
def snake_case_ ( self: Any ):
'''simple docstring'''
super().setUp()
def snake_case_ ( self: List[str] ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = pipeline(
'text-generation',model=self.model_name,model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa},max_new_tokens=self.MAX_NEW_TOKENS,)
# Real second forward pass
__UpperCamelCase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'],self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowerCamelCase (_a ):
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
super().setUp()
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name,load_in_abit=A_,device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ),{0, 1} )
# Check that inference pass works on the model
__UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' )
# Second real batch
__UpperCamelCase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ),max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0],skip_special_tokens=A_ ),self.EXPECTED_OUTPUTS )
class __lowerCamelCase (_a ):
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 'facebook/opt-350m'
super().setUp()
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name,load_in_abit=A_ )
self.assertEqual(set(model.hf_device_map.values() ),{torch.cuda.current_device()} )
for param in model.parameters():
__UpperCamelCase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__UpperCamelCase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A_ ) ):
__UpperCamelCase = LoRALayer(module.q_proj,rank=16 )
__UpperCamelCase = LoRALayer(module.k_proj,rank=16 )
__UpperCamelCase = LoRALayer(module.v_proj,rank=16 )
# Step 3: dummy batch
__UpperCamelCase = self.tokenizer('Test batch ',return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__UpperCamelCase = model.forward(**A_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A_,A_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A_,nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowerCamelCase (_a ):
_lowercase = """gpt2-xl"""
_lowercase = 3.3191854854152187
| 1 |
"""simple docstring"""
class _lowerCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = data
_SCREAMING_SNAKE_CASE : Tuple = previous
_SCREAMING_SNAKE_CASE : Any = next_node
def __str__( self ) -> str:
return F"""{self.data}"""
def A ( self ) -> int:
return self.data
def A ( self ) -> Dict:
return self.next
def A ( self ) -> Union[str, Any]:
return self.previous
class _lowerCAmelCase :
def __init__( self , lowerCAmelCase_ ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[Any] = head
def __iter__( self ) -> Any:
return self
def A ( self ) -> Dict:
if not self.current:
raise StopIteration
else:
_SCREAMING_SNAKE_CASE : Any = self.current.get_data()
_SCREAMING_SNAKE_CASE : Dict = self.current.get_next()
return value
class _lowerCAmelCase :
def __init__( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = None # First node in list
_SCREAMING_SNAKE_CASE : Union[str, Any] = None # Last node in list
def __str__( self ) -> Any:
_SCREAMING_SNAKE_CASE : str = self.head
_SCREAMING_SNAKE_CASE : int = []
while current is not None:
nodes.append(current.get_data() )
_SCREAMING_SNAKE_CASE : List[Any] = current.get_next()
return " ".join(str(lowerCAmelCase_ ) for node in nodes )
def __contains__( self , lowerCAmelCase_ ) -> Any:
_SCREAMING_SNAKE_CASE : Any = self.head
while current:
if current.get_data() == value:
return True
_SCREAMING_SNAKE_CASE : int = current.get_next()
return False
def __iter__( self ) -> str:
return LinkedListIterator(self.head )
def A ( self ) -> Dict:
if self.head:
return self.head.get_data()
return None
def A ( self ) -> List[Any]:
if self.tail:
return self.tail.get_data()
return None
def A ( self , lowerCAmelCase_ ) -> None:
if self.head is None:
_SCREAMING_SNAKE_CASE : List[Any] = node
_SCREAMING_SNAKE_CASE : Dict = node
else:
self.insert_before_node(self.head , lowerCAmelCase_ )
def A ( self , lowerCAmelCase_ ) -> None:
if self.head is None:
self.set_head(lowerCAmelCase_ )
else:
self.insert_after_node(self.tail , lowerCAmelCase_ )
def A ( self , lowerCAmelCase_ ) -> None:
_SCREAMING_SNAKE_CASE : Any = Node(lowerCAmelCase_ )
if self.head is None:
self.set_head(lowerCAmelCase_ )
else:
self.set_tail(lowerCAmelCase_ )
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_SCREAMING_SNAKE_CASE : Optional[Any] = node
_SCREAMING_SNAKE_CASE : List[Any] = node.previous
if node.get_previous() is None:
_SCREAMING_SNAKE_CASE : List[Any] = node_to_insert
else:
_SCREAMING_SNAKE_CASE : Optional[int] = node_to_insert
_SCREAMING_SNAKE_CASE : Any = node_to_insert
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_SCREAMING_SNAKE_CASE : Optional[Any] = node
_SCREAMING_SNAKE_CASE : Optional[int] = node.next
if node.get_next() is None:
_SCREAMING_SNAKE_CASE : Tuple = node_to_insert
else:
_SCREAMING_SNAKE_CASE : Any = node_to_insert
_SCREAMING_SNAKE_CASE : List[str] = node_to_insert
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_SCREAMING_SNAKE_CASE : List[Any] = 1
_SCREAMING_SNAKE_CASE : str = Node(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCAmelCase_ , lowerCAmelCase_ )
return
current_position += 1
_SCREAMING_SNAKE_CASE : List[str] = node.next
self.insert_after_node(self.tail , lowerCAmelCase_ )
def A ( self , lowerCAmelCase_ ) -> Node:
_SCREAMING_SNAKE_CASE : Tuple = self.head
while node:
if node.get_data() == item:
return node
_SCREAMING_SNAKE_CASE : Dict = node.get_next()
raise Exception('Node not found' )
def A ( self , lowerCAmelCase_ ) -> int:
if (node := self.get_node(lowerCAmelCase_ )) is not None:
if node == self.head:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.head.get_next()
if node == self.tail:
_SCREAMING_SNAKE_CASE : int = self.tail.get_previous()
self.remove_node_pointers(lowerCAmelCase_ )
@staticmethod
def A ( lowerCAmelCase_ ) -> None:
if node.get_next():
_SCREAMING_SNAKE_CASE : List[Any] = node.previous
if node.get_previous():
_SCREAMING_SNAKE_CASE : str = node.next
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : List[Any] = None
def A ( self ) -> List[str]:
return self.head is None
def lowercase__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 621 | 0 |
def lowercase ( __A : list[int] , __A : int ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[int] = len(__A )
snake_case : int = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
snake_case : int = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
snake_case : Optional[Any] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
snake_case : Optional[int] = subset[i - 1][j]
if arr[i - 1] <= j:
snake_case : Union[str, Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
from __future__ import annotations
from collections import Counter
from random import random
class _A :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
snake_case : Optional[Any] = {}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : int = {}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE_ )
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = probability
def snake_case_ ( self ):
'''simple docstring'''
return list(self.connections )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Union[str, Any] = 0
snake_case : Optional[int] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowercase ( __A : str , __A : list[tuple[str, str, float]] , __A : int ) -> dict[str, int]:
'''simple docstring'''
snake_case : List[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__A , __A , __A )
snake_case : Dict = Counter(graph.get_nodes() )
snake_case : int = start
for _ in range(__A ):
snake_case : Optional[int] = graph.transition(__A )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 | 0 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
snake_case = logging.get_logger(__name__)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str ,*__A : Dict ,**__A : List[Any] ) -> None:
warnings.warn(
'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use OwlViTImageProcessor instead.' ,__A ,)
super().__init__(*__A ,**__A ) | 67 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Optional[int] = logging.get_logger(__name__)
__lowercase : Optional[int] = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class __lowercase ( _lowercase ):
lowerCamelCase : List[str] = "luke"
def __init__(self , A=5_0_2_6_7 , A=5_0_0_0_0_0 , A=7_6_8 , A=2_5_6 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1E-12 , A=True , A=None , A=1 , A=0 , A=2 , **A , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : List[str] = entity_vocab_size
lowerCamelCase_ : Dict = hidden_size
lowerCamelCase_ : str = entity_emb_size
lowerCamelCase_ : List[str] = num_hidden_layers
lowerCamelCase_ : List[Any] = num_attention_heads
lowerCamelCase_ : int = hidden_act
lowerCamelCase_ : List[str] = intermediate_size
lowerCamelCase_ : Tuple = hidden_dropout_prob
lowerCamelCase_ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase_ : Any = max_position_embeddings
lowerCamelCase_ : Any = type_vocab_size
lowerCamelCase_ : List[str] = initializer_range
lowerCamelCase_ : Any = layer_norm_eps
lowerCamelCase_ : Union[str, Any] = use_entity_aware_attention
lowerCamelCase_ : Optional[Any] = classifier_dropout
| 422 | 0 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__a = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class __lowercase ( tr.AbstractTransform ):
def __init__( self : Any , __lowerCamelCase : str = " " ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = sentence_delimiter
def _lowercase ( self : List[Any] , __lowerCamelCase : str ) -> int:
"""simple docstring"""
return list(__lowerCamelCase )
def _lowercase ( self : str , __lowerCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = []
for sent_idx, sentence in enumerate(__lowerCamelCase ):
chars.extend(self.process_string(__lowerCamelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__lowerCamelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
__a = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__a = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__a = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
__a = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
__a = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def _lowercase ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]=False ) -> List[str]:
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
__lowerCamelCase , __lowerCamelCase , truth_transform=__lowerCamelCase , hypothesis_transform=__lowerCamelCase , )["wer"]
UpperCAmelCase = 0
UpperCAmelCase = 0
for prediction, reference in zip(__lowerCamelCase , __lowerCamelCase ):
UpperCAmelCase = jiwer.compute_measures(
__lowerCamelCase , __lowerCamelCase , truth_transform=__lowerCamelCase , hypothesis_transform=__lowerCamelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 703 |
import numpy
class __lowercase :
def __init__( self : Union[str, Any] , __lowerCamelCase : numpy.ndarray , __lowerCamelCase : numpy.ndarray ) -> None:
"""simple docstring"""
UpperCAmelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
UpperCAmelCase = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
UpperCAmelCase = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
UpperCAmelCase = numpy.random.rand(3 , 1 )
# Real output values provided.
UpperCAmelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
UpperCAmelCase = numpy.zeros(output_array.shape )
def _lowercase ( self : List[str] ) -> numpy.ndarray:
"""simple docstring"""
UpperCAmelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
UpperCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
UpperCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _lowercase ( self : Optional[Any] ) -> None:
"""simple docstring"""
UpperCAmelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
UpperCAmelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
UpperCAmelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _lowercase ( self : Any , __lowerCamelCase : numpy.ndarray , __lowerCamelCase : int , __lowerCamelCase : bool ) -> None:
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
UpperCAmelCase = self.feedforward()
self.back_propagation()
if give_loss:
UpperCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def _lowercase ( self : List[str] , __lowerCamelCase : numpy.ndarray ) -> int:
"""simple docstring"""
UpperCAmelCase = input_arr
UpperCAmelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
UpperCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
UpperCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _UpperCamelCase ( lowerCAmelCase_ ) ->numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def _UpperCamelCase ( lowerCAmelCase_ ) ->numpy.ndarray:
return (value) * (1 - (value))
def _UpperCamelCase ( ) ->int:
UpperCAmelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
UpperCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
UpperCAmelCase = TwoHiddenLayerNeuralNetwork(
input_array=lowerCAmelCase_ , output_array=lowerCAmelCase_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=lowerCAmelCase_ , iterations=1_0 , give_loss=lowerCAmelCase_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 627 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "gpt_bigcode"
lowercase_ = ["past_key_values"]
lowercase_ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self : str , UpperCAmelCase_ : Optional[int]=50_257 , UpperCAmelCase_ : List[Any]=1_024 , UpperCAmelCase_ : Tuple=768 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Union[str, Any]="gelu_pytorch_tanh" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[Any]=1E-5 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[Any]=50_256 , UpperCAmelCase_ : List[str]=50_256 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=True , **UpperCAmelCase_ : List[str] , ) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =vocab_size
lowerCamelCase__: List[Any] =n_positions
lowerCamelCase__: List[Any] =n_embd
lowerCamelCase__: Tuple =n_layer
lowerCamelCase__: Optional[Any] =n_head
lowerCamelCase__: Any =n_inner
lowerCamelCase__: Optional[int] =activation_function
lowerCamelCase__: Any =resid_pdrop
lowerCamelCase__: Union[str, Any] =embd_pdrop
lowerCamelCase__: Union[str, Any] =attn_pdrop
lowerCamelCase__: List[str] =layer_norm_epsilon
lowerCamelCase__: List[str] =initializer_range
lowerCamelCase__: Optional[int] =scale_attn_weights
lowerCamelCase__: List[Any] =use_cache
lowerCamelCase__: Any =attention_softmax_in_fpaa
lowerCamelCase__: Dict =scale_attention_softmax_in_fpaa
lowerCamelCase__: List[Any] =multi_query
lowerCamelCase__: Optional[int] =bos_token_id
lowerCamelCase__: int =eos_token_id
super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
| 59 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: Tuple =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features
lowerCamelCase__: Optional[int] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase_ ( __a , __a , __a ) -> int:
"""simple docstring"""
if issubclass(__a , __a ):
lowerCamelCase__: List[Any] =parquet_path
elif issubclass(__a , __a ):
lowerCamelCase__: str =[parquet_path]
lowerCamelCase__: Tuple =tmp_path / "cache"
lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Dict:
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
lowerCamelCase__: Tuple =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path / "cache"
lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: Tuple =ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: List[Any] =features.copy() if features else default_expected_features
lowerCamelCase__: int =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: Optional[Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
if split:
lowerCamelCase__: Any ={split: parquet_path}
else:
lowerCamelCase__: int ="train"
lowerCamelCase__: Any ={"train": parquet_path, "test": parquet_path}
lowerCamelCase__: str =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
lowerCamelCase__: List[str] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: List[str] =pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCamelCase__: List[str] =pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( __a , __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" )
lowerCamelCase__: Union[str, Any] ={"image": [image_path]}
lowerCamelCase__: Optional[Any] =Features({"image": Image()} )
lowerCamelCase__: Optional[int] =Dataset.from_dict(__a , features=__a )
lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Dict =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 59 | 1 |
"""simple docstring"""
def lowercase ( UpperCamelCase : list[list[int]] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[int] ):
"""simple docstring"""
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowercase ( UpperCamelCase : list[list[int]] , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
# Base Case
if curr_ind == len(UpperCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(UpperCamelCase ) ):
if valid_connection(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
# Insert current vertex into path as next transition
A__ : Any =next_ver
# Validate created path
if util_hamilton_cycle(UpperCamelCase , UpperCamelCase , curr_ind + 1 ):
return True
# Backtrack
A__ : Tuple =-1
return False
def lowercase ( UpperCamelCase : list[list[int]] , UpperCamelCase : int = 0 ):
"""simple docstring"""
A__ : List[Any] =[-1] * (len(UpperCamelCase ) + 1)
# initialize start and end of path with starting index
A__ : List[Any] =start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(UpperCamelCase , UpperCamelCase , 1 ) else []
| 595 | """simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : str ):
A__ : Optional[Any] =[
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ ) )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : List[str] =[
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ ) )
def _UpperCAmelCase ( self : Any ):
A__ : Union[str, Any] =[
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCamelCase__ ) )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : int =[
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ ) )
def _UpperCAmelCase ( self : int ):
A__ : List[Any] =[
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(UpperCamelCase__ ) )
def _UpperCAmelCase ( self : List[str] ):
A__ : int =[
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
A__ : Dict ="fp16"
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ , variant=UpperCamelCase__ ) )
def _UpperCAmelCase ( self : Tuple ):
A__ : Optional[Any] =[
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
A__ : Any ="fp16"
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ , variant=UpperCamelCase__ ) )
def _UpperCAmelCase ( self : Optional[int] ):
# pass variant but use the non-variant filenames
A__ : int =[
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
A__ : Dict ="fp16"
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ , variant=UpperCamelCase__ ) )
def _UpperCAmelCase ( self : str ):
A__ : Dict =[
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
A__ : int ="fp16"
self.assertFalse(is_safetensors_compatible(UpperCamelCase__ , variant=UpperCamelCase__ ) )
def _UpperCAmelCase ( self : List[str] ):
A__ : Optional[int] =[
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
A__ : Any ="fp16"
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ , variant=UpperCamelCase__ ) )
def _UpperCAmelCase ( self : List[Any] ):
# pass variant but use the non-variant filenames
A__ : int =[
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
A__ : List[str] ="fp16"
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ , variant=UpperCamelCase__ ) )
def _UpperCAmelCase ( self : List[str] ):
A__ : Optional[int] =[
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
A__ : Tuple ="fp16"
self.assertFalse(is_safetensors_compatible(UpperCamelCase__ , variant=UpperCamelCase__ ) )
| 595 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__lowercase : Tuple = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowerCamelCase : List[Any] ):
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
__lowercase :Union[str, Any] = ["pixel_values"]
def __init__( self , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = PILImageResampling.BILINEAR , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = 1 / 255 , UpperCamelCase__ = True , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
lowerCamelCase_ = size if size is not None else {'shortest_edge': 256}
lowerCamelCase_ = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
lowerCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = get_size_dict(__lowerCamelCase , param_name='''crop_size''' )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = resample
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = offset
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = PILImageResampling.BILINEAR , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
if "shortest_edge" in size:
lowerCamelCase_ = get_resize_output_image_size(__lowerCamelCase , size['''shortest_edge'''] , default_to_square=__lowerCamelCase )
elif "height" in size and "width" in size:
lowerCamelCase_ = (size['height'], size['width'])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Any:
'''simple docstring'''
lowerCamelCase_ = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(__lowerCamelCase , size=(size['''height'''], size['''width''']) , data_format=__lowerCamelCase , **__lowerCamelCase )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = image.astype(np.floataa )
if offset:
lowerCamelCase_ = image - (scale / 2)
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = ChannelDimension.FIRST , ) -> Union[str, Any]:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
lowerCamelCase_ = to_numpy_array(__lowerCamelCase )
if do_resize:
lowerCamelCase_ = self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase )
if do_center_crop:
lowerCamelCase_ = self.center_crop(__lowerCamelCase , size=__lowerCamelCase )
if do_rescale:
lowerCamelCase_ = self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase , offset=__lowerCamelCase )
if do_normalize:
lowerCamelCase_ = self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase )
lowerCamelCase_ = to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase )
return image
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = ChannelDimension.FIRST , **UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = offset if offset is not None else self.offset
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ = get_size_dict(__lowerCamelCase , param_name='''crop_size''' )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowerCamelCase_ = make_batched(__lowerCamelCase )
lowerCamelCase_ = [
[
self._preprocess_image(
image=__lowerCamelCase , do_resize=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , do_center_crop=__lowerCamelCase , crop_size=__lowerCamelCase , do_rescale=__lowerCamelCase , rescale_factor=__lowerCamelCase , offset=__lowerCamelCase , do_normalize=__lowerCamelCase , image_mean=__lowerCamelCase , image_std=__lowerCamelCase , data_format=__lowerCamelCase , )
for img in video
]
for video in videos
]
lowerCamelCase_ = {'pixel_values': videos}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase ) | 142 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class lowercase_ ( lowerCAmelCase_ ):
def _lowerCAmelCase ( self : Optional[int] ):
snake_case__ : Tuple = tempfile.mkdtemp()
snake_case__ : Dict = 8
# DPR tok
snake_case__ : Any = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
snake_case__ : Tuple = os.path.join(__lowerCamelCase , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
snake_case__ : Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
snake_case__ : str = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
snake_case__ : Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
snake_case__ : Any = {'unk_token': '<unk>'}
snake_case__ : List[str] = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
snake_case__ : Dict = os.path.join(__lowerCamelCase , BART_VOCAB_FILES_NAMES['vocab_file'] )
snake_case__ : int = os.path.join(__lowerCamelCase , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__lowerCamelCase ) )
def _lowerCAmelCase ( self : Optional[int] ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _lowerCAmelCase ( self : Union[str, Any] ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _lowerCAmelCase ( self : Any ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def _lowerCAmelCase ( self : int ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self : str ):
snake_case__ : int = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowerCAmelCase ( self : Any ):
snake_case__ : List[str] = self.get_dummy_dataset()
snake_case__ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
snake_case__ : Optional[Any] = dataset
snake_case__ : List[Any] = RagRetriever(
__lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowerCAmelCase ( self : List[Any] , __lowerCamelCase : bool ):
snake_case__ : Union[str, Any] = self.get_dummy_dataset()
snake_case__ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
snake_case__ : List[Any] = os.path.join(self.tmpdirname , 'dataset' )
snake_case__ : List[str] = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
snake_case__ : Optional[int] = RagRetriever(
__lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
snake_case__ : Union[str, Any] = RagRetriever(
__lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __lowerCamelCase ) , )
return retriever
def _lowerCAmelCase ( self : Tuple ):
snake_case__ : Dict = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
snake_case__ : List[str] = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
snake_case__ : Optional[Any] = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(__lowerCamelCase , open(__lowerCamelCase , 'wb' ) )
snake_case__ : Union[str, Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
snake_case__ : List[str] = RagRetriever(
__lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : List[str] = 1
snake_case__ : int = self.get_dummy_canonical_hf_index_retriever()
snake_case__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ , snake_case__ , snake_case__ : int = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowerCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __lowerCamelCase )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCAmelCase ( self : List[str] ):
snake_case__ : str = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
snake_case__ : Union[str, Any] = self.get_dummy_dataset()
retriever.save_pretrained(__lowerCamelCase )
snake_case__ : List[Any] = RagRetriever.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
snake_case__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Tuple = retriever.retrieve(__lowerCamelCase , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCAmelCase ( self : int ):
snake_case__ : Any = 1
snake_case__ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase )
snake_case__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ , snake_case__ , snake_case__ : List[str] = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowerCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __lowerCamelCase )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCAmelCase ( self : int ):
snake_case__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__lowerCamelCase )
snake_case__ : int = RagRetriever.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
snake_case__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : List[str] = retriever.retrieve(__lowerCamelCase , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCAmelCase ( self : List[str] ):
snake_case__ : Any = 1
snake_case__ : Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase )
snake_case__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ , snake_case__ , snake_case__ : List[Any] = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowerCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __lowerCamelCase )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCAmelCase ( self : Optional[int] ):
snake_case__ : Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__lowerCamelCase )
snake_case__ : int = RagRetriever.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
snake_case__ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : List[Any] = retriever.retrieve(__lowerCamelCase , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCAmelCase ( self : int ):
snake_case__ : Tuple = 1
snake_case__ : Tuple = self.get_dummy_legacy_index_retriever()
snake_case__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ , snake_case__ , snake_case__ : List[str] = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowerCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , __lowerCamelCase )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCAmelCase ( self : Union[str, Any] ):
snake_case__ : Any = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__lowerCamelCase )
snake_case__ : Dict = RagRetriever.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
snake_case__ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : List[Any] = retriever.retrieve(__lowerCamelCase , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowerCAmelCase ( self : Any ):
import torch
snake_case__ : Optional[Any] = 1
snake_case__ : int = self.get_dummy_canonical_hf_index_retriever()
snake_case__ : List[Any] = [[5, 7], [10, 11]]
snake_case__ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Any = retriever(__lowerCamelCase , __lowerCamelCase , prefix=retriever.config.generator.prefix , n_docs=__lowerCamelCase )
snake_case__ , snake_case__ , snake_case__ : Any = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , np.ndarray )
snake_case__ : Dict = retriever(
__lowerCamelCase , __lowerCamelCase , prefix=retriever.config.generator.prefix , n_docs=__lowerCamelCase , return_tensors='pt' , )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[Any] = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowerCAmelCase ( self : Optional[Any] ):
snake_case__ : List[Any] = self.get_dpr_ctx_encoder_tokenizer()
snake_case__ : List[Any] = 1
snake_case__ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase )
retriever.set_ctx_encoder_tokenizer(__lowerCamelCase )
snake_case__ : Optional[int] = [[5, 7], [10, 11]]
snake_case__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Tuple = retriever(__lowerCamelCase , __lowerCamelCase , prefix=retriever.config.generator.prefix , n_docs=__lowerCamelCase )
self.assertEqual(
len(__lowerCamelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __lowerCamelCase ) # check for doc token related keys in dictionary.
| 270 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
'''simple docstring'''
_A = """dandelin/vilt-b32-finetuned-vqa"""
_A = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
_A = """image_qa"""
_A = AutoProcessor
_A = AutoModelForVisualQuestionAnswering
_A = ["""image""", """text"""]
_A = ["""text"""]
def __init__( self , *lowercase__ , **lowercase__ ):
"""simple docstring"""
requires_backends(self , ["vision"] )
super().__init__(*lowercase__ , **lowercase__ )
def __lowerCamelCase ( self , lowercase__ , lowercase__ ):
"""simple docstring"""
return self.pre_processor(lowercase__ , lowercase__ , return_tensors="pt" )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
with torch.no_grad():
return self.model(**lowercase__ ).logits
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 717 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 68 | 0 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A ( self : List[Any]):
_A : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
_A : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
_A : Tuple = 'xvjiarui/stable-diffusion-2-inpainting'
_A , _A : List[Any] = FlaxStableDiffusionInpaintPipeline.from_pretrained(SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE)
_A : Dict = 'Face of a yellow cat, high resolution, sitting on a park bench'
_A : Any = jax.random.PRNGKey(0)
_A : List[str] = 50
_A : Union[str, Any] = jax.device_count()
_A : Union[str, Any] = num_samples * [prompt]
_A : List[Any] = num_samples * [init_image]
_A : Dict = num_samples * [mask_image]
_A , _A , _A : List[Any] = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
# shard inputs and rng
_A : Union[str, Any] = replicate(SCREAMING_SNAKE_CASE)
_A : List[Any] = jax.random.split(SCREAMING_SNAKE_CASE , jax.device_count())
_A : List[str] = shard(SCREAMING_SNAKE_CASE)
_A : List[str] = shard(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = shard(SCREAMING_SNAKE_CASE)
_A : Dict = pipeline(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , jit=SCREAMING_SNAKE_CASE)
_A : Tuple = output.images.reshape(SCREAMING_SNAKE_CASE , 512 , 512 , 3)
_A : Union[str, Any] = images[0, 253:256, 253:256, -1]
_A : Dict = jnp.asarray(jax.device_get(image_slice.flatten()))
_A : List[Any] = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084])
print(F'output_slice: {output_slice}')
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 128 |
'''simple docstring'''
import re
def lowerCAmelCase__ ( lowerCamelCase : str ):
if len(re.findall('[ATCG]' ,lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' ,'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128 | 1 |
import math
import sys
def __lowerCAmelCase ( __lowerCamelCase : str ) -> str:
__lowerCAmelCase =""""""
try:
with open(__lowerCamelCase , """rb""" ) as binary_file:
__lowerCAmelCase =binary_file.read()
for dat in data:
__lowerCAmelCase =f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def __lowerCAmelCase ( __lowerCamelCase : str ) -> str:
__lowerCAmelCase ={"""0""": """0""", """1""": """1"""}
__lowerCAmelCase , __lowerCAmelCase ="""""", """"""
__lowerCAmelCase =len(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__lowerCAmelCase =lexicon[curr_string]
result += last_match_id
__lowerCAmelCase =last_match_id + """0"""
if math.loga(__lowerCamelCase ).is_integer():
__lowerCAmelCase ={}
for curr_key in list(__lowerCamelCase ):
__lowerCAmelCase =lexicon.pop(__lowerCamelCase )
__lowerCAmelCase =new_lex
__lowerCAmelCase =last_match_id + """1"""
index += 1
__lowerCAmelCase =""""""
return result
def __lowerCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : str ) -> None:
__lowerCAmelCase =8
try:
with open(__lowerCamelCase , """wb""" ) as opened_file:
__lowerCAmelCase =[
to_write[i : i + byte_length]
for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__lowerCamelCase , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def __lowerCAmelCase ( __lowerCamelCase : str ) -> str:
__lowerCAmelCase =0
for letter in data_bits:
if letter == "1":
break
counter += 1
__lowerCAmelCase =data_bits[counter:]
__lowerCAmelCase =data_bits[counter + 1 :]
return data_bits
def __lowerCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : str ) -> None:
__lowerCAmelCase =read_file_binary(__lowerCamelCase )
__lowerCAmelCase =remove_prefix(__lowerCamelCase )
__lowerCAmelCase =decompress_data(__lowerCamelCase )
write_file_binary(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 456 |
def __lowerCAmelCase ( __lowerCamelCase : int ) -> None:
__lowerCAmelCase =generate_pascal_triangle(__lowerCamelCase )
for row_idx in range(__lowerCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def __lowerCAmelCase ( __lowerCamelCase : int ) -> list[list[int]]:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
__lowerCAmelCase =[]
for current_row_idx in range(__lowerCamelCase ):
__lowerCAmelCase =populate_current_row(__lowerCamelCase , __lowerCamelCase )
triangle.append(__lowerCamelCase )
return triangle
def __lowerCAmelCase ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int ) -> list[int]:
__lowerCAmelCase =[-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__lowerCAmelCase , __lowerCAmelCase =1, 1
for current_col_idx in range(1 , __lowerCamelCase ):
calculate_current_element(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return current_row
def __lowerCAmelCase ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int , ) -> None:
__lowerCAmelCase =triangle[current_row_idx - 1][current_col_idx - 1]
__lowerCAmelCase =triangle[current_row_idx - 1][current_col_idx]
__lowerCAmelCase =above_to_left_elt + above_to_right_elt
def __lowerCAmelCase ( __lowerCamelCase : int ) -> list[list[int]]:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
__lowerCAmelCase =[[1]]
for row_index in range(1 , __lowerCamelCase ):
__lowerCAmelCase =[0] + result[-1] + [0]
__lowerCAmelCase =row_index + 1
# Calculate the number of distinct elements in a row
__lowerCAmelCase =sum(divmod(__lowerCamelCase , 2 ) )
__lowerCAmelCase =[
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__lowerCAmelCase =row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__lowerCAmelCase =row_first_half + row_second_half
result.append(__lowerCamelCase )
return result
def __lowerCAmelCase ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCamelCase : Callable , __lowerCamelCase : int ) -> None:
__lowerCAmelCase =f"""{func.__name__}({value})"""
__lowerCAmelCase =timeit(f"""__main__.{call}""" , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCamelCase , __lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 456 | 1 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = sd_pipe.prepare_inputs(UpperCamelCase_ )
lowercase__ = replicate(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowercase__ = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=25 , jit=UpperCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowercase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = '''stabilityai/stable-diffusion-2'''
lowercase__ , lowercase__ = FlaxDPMSolverMultistepScheduler.from_pretrained(UpperCamelCase_ , subfolder='''scheduler''' )
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
UpperCamelCase_ , scheduler=UpperCamelCase_ , revision='''bf16''' , dtype=jnp.bfloataa , )
lowercase__ = scheduler_params
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = sd_pipe.prepare_inputs(UpperCamelCase_ )
lowercase__ = replicate(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowercase__ = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=25 , jit=UpperCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowercase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 43 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""",
"""kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""",
"""kssteven/ibert-roberta-large-mnli""": (
"""https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"""
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = """ibert"""
def __init__( self : List[Any] , lowerCAmelCase : Any=3_0522 , lowerCAmelCase : Tuple=768 , lowerCAmelCase : str=12 , lowerCAmelCase : str=12 , lowerCAmelCase : str=3072 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : int=0.1 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Tuple=512 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : Optional[Any]=1E-12 , lowerCAmelCase : Any=1 , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : str="absolute" , lowerCAmelCase : Any=False , lowerCAmelCase : Optional[Any]="none" , **lowerCAmelCase : Optional[int] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase)
_snake_case : List[str] = vocab_size
_snake_case : Tuple = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : Union[str, Any] = hidden_act
_snake_case : str = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : str = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : int = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : Tuple = position_embedding_type
_snake_case : Union[str, Any] = quant_mode
_snake_case : List[Any] = force_dequant
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Tuple) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_snake_case : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
| 477 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = None , lowercase_ = None ) -> None:
"""simple docstring"""
if start is None:
__UpperCamelCase = 0
if end is None:
__UpperCamelCase = len(lowercase_ ) - 1
if start >= end:
return
__UpperCamelCase = (start + end) // 2
slowsort(lowercase_ , lowercase_ , lowercase_ )
slowsort(lowercase_ , mid + 1 , lowercase_ )
if sequence[end] < sequence[mid]:
__UpperCamelCase , __UpperCamelCase = sequence[mid], sequence[end]
slowsort(lowercase_ , lowercase_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 375 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : Any = ""
lowerCAmelCase__ : Any = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self : str , snake_case : Optional[DatasetInfo] = None , snake_case : Optional[str] = None , **snake_case : List[Any] , ):
super().__init__(self , **snake_case )
__UpperCamelCase = repo_info
__UpperCamelCase = token
__UpperCamelCase = None
def snake_case ( self : List[Any] ):
if self.dir_cache is None:
__UpperCamelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__UpperCamelCase = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(snake_case ): {'''name''': str(snake_case ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def snake_case ( self : Dict , snake_case : str , snake_case : str = "rb" , **snake_case : Union[str, Any] , ):
if not isinstance(self.repo_info , snake_case ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
__UpperCamelCase = hf_hub_url(self.repo_info.id , snake_case , revision=self.repo_info.sha )
return fsspec.open(
snake_case , mode=snake_case , headers=get_authentication_headers_for_url(snake_case , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def snake_case ( self : Optional[Any] , snake_case : Tuple , **snake_case : List[Any] ):
self._get_dirs()
__UpperCamelCase = self._strip_protocol(snake_case )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(snake_case )
def snake_case ( self : List[str] , snake_case : int , snake_case : Tuple=False , **snake_case : Dict ):
self._get_dirs()
__UpperCamelCase = PurePosixPath(path.strip('''/''' ) )
__UpperCamelCase = {}
for p, f in self.dir_cache.items():
__UpperCamelCase = PurePosixPath(p.strip('''/''' ) )
__UpperCamelCase = p.parent
if root == path:
__UpperCamelCase = f
__UpperCamelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 375 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Optional[Any] = """vit_msn"""
def __init__( self :Union[str, Any] , lowerCamelCase__ :Optional[int]=7_68 , lowerCamelCase__ :List[str]=12 , lowerCamelCase__ :Optional[Any]=12 , lowerCamelCase__ :Tuple=30_72 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :Any=0.0 , lowerCamelCase__ :List[str]=0.0 , lowerCamelCase__ :str=0.02 , lowerCamelCase__ :str=1e-06 , lowerCamelCase__ :Tuple=2_24 , lowerCamelCase__ :Any=16 , lowerCamelCase__ :int=3 , lowerCamelCase__ :List[str]=True , **lowerCamelCase__ :Union[str, Any] , ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :str = hidden_size
UpperCamelCase__ :Optional[int] = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :str = intermediate_size
UpperCamelCase__ :Optional[int] = hidden_act
UpperCamelCase__ :Any = hidden_dropout_prob
UpperCamelCase__ :str = attention_probs_dropout_prob
UpperCamelCase__ :Optional[Any] = initializer_range
UpperCamelCase__ :List[Any] = layer_norm_eps
UpperCamelCase__ :Any = image_size
UpperCamelCase__ :Tuple = patch_size
UpperCamelCase__ :Union[str, Any] = num_channels
UpperCamelCase__ :Tuple = qkv_bias | 45 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_=7 )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = None
if token is not None:
UpperCamelCase = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
# The id of a workflow (not of a workflow run)
UpperCamelCase = "636036"
UpperCamelCase = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
UpperCamelCase = requests.get(UpperCAmelCase_ , headers=UpperCAmelCase_ ).json()
return result["workflow_runs"]
def lowerCamelCase__ ( UpperCAmelCase_ )-> Any:
"""simple docstring"""
UpperCamelCase = get_daily_ci_runs(UpperCAmelCase_ )
UpperCamelCase = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
UpperCamelCase = workflow_run["id"]
break
return workflow_run_id
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> int:
"""simple docstring"""
UpperCamelCase = get_last_daily_ci_runs(UpperCAmelCase_ )
if workflow_run_id is not None:
UpperCamelCase = get_artifacts_links(worflow_run_id=UpperCAmelCase_ , token=UpperCAmelCase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
UpperCamelCase = artifacts_links[artifact_name]
download_artifact(
artifact_name=UpperCAmelCase_ , artifact_url=UpperCAmelCase_ , output_dir=UpperCAmelCase_ , token=UpperCAmelCase_ )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Optional[int]:
"""simple docstring"""
get_last_daily_ci_artifacts(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = {}
for artifact_name in artifact_names:
UpperCamelCase = os.path.join(UpperCAmelCase_ , F"{artifact_name}.zip" )
if os.path.isfile(UpperCAmelCase_ ):
UpperCamelCase = {}
with zipfile.ZipFile(UpperCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCAmelCase_ ):
# read the file
with z.open(UpperCAmelCase_ ) as f:
UpperCamelCase = f.read().decode("UTF-8" )
return results
| 554 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__A : Dict = logging.getLogger()
__A : List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
def a__ ( self : Optional[Any] , __lowerCamelCase : Dict ) -> Optional[int]:
'''simple docstring'''
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
lowerCamelCase__ = {"source": "What is love ?", "target": "life"}
lowerCamelCase__ = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCamelCase__ = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(__lowerCamelCase , f'''{split}.{field}''' ) , "w" ) as f:
f.write(__lowerCamelCase )
def a__ ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : str = "pytorch" ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = self.get_auto_remove_tmp_dir()
lowerCamelCase__ = os.path.join(__lowerCamelCase , "output" )
lowerCamelCase__ = os.path.join(__lowerCamelCase , "data" )
self._create_dummy_data(data_dir=__lowerCamelCase )
lowerCamelCase__ = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
lowerCamelCase__ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__lowerCamelCase , env=self.get_env() )
lowerCamelCase__ = os.path.join(__lowerCamelCase , "metrics.json" )
with open(__lowerCamelCase ) as f:
lowerCamelCase__ = json.load(__lowerCamelCase )
return result
@require_torch_gpu
def a__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def a__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def a__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def a__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 187 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Tuple = {
"""configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""],
"""tokenization_deberta""": ["""DebertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ["""DebertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"""DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DebertaForMaskedLM""",
"""DebertaForQuestionAnswering""",
"""DebertaForSequenceClassification""",
"""DebertaForTokenClassification""",
"""DebertaModel""",
"""DebertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"""TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDebertaForMaskedLM""",
"""TFDebertaForQuestionAnswering""",
"""TFDebertaForSequenceClassification""",
"""TFDebertaForTokenClassification""",
"""TFDebertaModel""",
"""TFDebertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 187 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class lowercase_ ( a_ ):
__magic_name__ : int = """git_vision_model"""
def __init__( self : Tuple , _lowercase : Union[str, Any]=7_6_8 , _lowercase : int=3_0_7_2 , _lowercase : int=1_2 , _lowercase : str=1_2 , _lowercase : Tuple=3 , _lowercase : List[str]=2_2_4 , _lowercase : Union[str, Any]=1_6 , _lowercase : List[str]="quick_gelu" , _lowercase : Optional[Any]=1e-5 , _lowercase : List[Any]=0.0 , _lowercase : List[str]=0.02 , **_lowercase : Any , ):
super().__init__(**_lowercase )
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : Tuple = patch_size
lowerCAmelCase__ : List[Any] = image_size
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : Any = attention_dropout
lowerCAmelCase__ : Union[str, Any] = layer_norm_eps
lowerCAmelCase__ : Optional[int] = hidden_act
@classmethod
def _lowerCAmelCase ( cls : Optional[Any] , _lowercase : Union[str, os.PathLike] , **_lowercase : int ):
cls._set_token_in_kwargs(_lowercase )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
lowerCAmelCase__ : int = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowercase , **_lowercase )
class lowercase_ ( a_ ):
__magic_name__ : Dict = """git"""
def __init__( self : Dict , _lowercase : str=None , _lowercase : int=3_0_5_2_2 , _lowercase : int=7_6_8 , _lowercase : Union[str, Any]=6 , _lowercase : Union[str, Any]=1_2 , _lowercase : Any=3_0_7_2 , _lowercase : Tuple="gelu" , _lowercase : Optional[int]=0.1 , _lowercase : Any=0.1 , _lowercase : Optional[int]=1_0_2_4 , _lowercase : Union[str, Any]=0.02 , _lowercase : Optional[int]=1e-1_2 , _lowercase : Union[str, Any]=0 , _lowercase : Optional[int]="absolute" , _lowercase : List[str]=True , _lowercase : Any=False , _lowercase : List[str]=1_0_1 , _lowercase : List[Any]=1_0_2 , _lowercase : int=None , **_lowercase : int , ):
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , pad_token_id=_lowercase , **_lowercase )
if vision_config is None:
lowerCAmelCase__ : Union[str, Any] = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
lowerCAmelCase__ : Any = GitVisionConfig(**_lowercase )
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : str = hidden_size
lowerCAmelCase__ : Tuple = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : Any = max_position_embeddings
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : Tuple = position_embedding_type
lowerCAmelCase__ : List[Any] = use_cache
lowerCAmelCase__ : List[str] = tie_word_embeddings
lowerCAmelCase__ : str = num_image_with_embedding
lowerCAmelCase__ : Optional[Any] = bos_token_id
lowerCAmelCase__ : int = eos_token_id
def _lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ : List[Any] = self.vision_config.to_dict()
lowerCAmelCase__ : Dict = self.__class__.model_type
return output
| 308 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def lowercase__ ( lowerCamelCase : Tuple , lowerCamelCase : Dict=False , lowerCamelCase : Any=False ) -> Union[str, Any]:
lowerCAmelCase__ : str = "backbone." if is_semantic else ""
lowerCAmelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", "beit.embeddings.cls_token"),
(F"{prefix}patch_embed.proj.weight", "beit.embeddings.patch_embeddings.projection.weight"),
(F"{prefix}patch_embed.proj.bias", "beit.embeddings.patch_embeddings.projection.bias"),
(F"{prefix}pos_embed", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowercase__ ( lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : int=False , lowerCamelCase : Union[str, Any]=False ) -> List[str]:
for i in range(config.num_hidden_layers ):
lowerCAmelCase__ : Optional[int] = "backbone." if is_semantic else ""
# queries, keys and values
lowerCAmelCase__ : Any = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ : List[Any] = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
lowerCAmelCase__ : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
lowerCAmelCase__ : Tuple = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : Optional[Any] = q_bias
lowerCAmelCase__ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : List[Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowerCAmelCase__ : Optional[Any] = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
lowerCAmelCase__ : Optional[Any] = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
lowerCAmelCase__ : Union[str, Any] = gamma_a
lowerCAmelCase__ : Optional[Any] = gamma_a
def lowercase__ ( lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple ) -> List[Any]:
lowerCAmelCase__ : Dict = dct.pop(lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = val
def lowercase__ ( ) -> Any:
lowerCAmelCase__ : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ : Tuple = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowercase__ ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : List[str]=False ) -> int:
lowerCAmelCase__ : Optional[int] = False if "rvlcdip" in checkpoint_url else True
lowerCAmelCase__ : Any = BeitConfig(use_absolute_position_embeddings=lowerCamelCase , use_mask_token=lowerCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowerCAmelCase__ : Optional[Any] = 1_0_2_4
lowerCAmelCase__ : Any = 4_0_9_6
lowerCAmelCase__ : int = 2_4
lowerCAmelCase__ : Tuple = 1_6
# labels
if "rvlcdip" in checkpoint_url:
lowerCAmelCase__ : Optional[Any] = 1_6
lowerCAmelCase__ : str = "huggingface/label-files"
lowerCAmelCase__ : List[str] = "rvlcdip-id2label.json"
lowerCAmelCase__ : Tuple = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) )
lowerCAmelCase__ : Tuple = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[Any] = idalabel
lowerCAmelCase__ : Any = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ : int = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="cpu" )["model"]
lowerCAmelCase__ : Union[str, Any] = create_rename_keys(lowerCamelCase , has_lm_head=lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_q_k_v(lowerCamelCase , lowerCamelCase , has_lm_head=lowerCamelCase )
# load HuggingFace model
lowerCAmelCase__ : Union[str, Any] = BeitForMaskedImageModeling(lowerCamelCase ) if has_lm_head else BeitForImageClassification(lowerCamelCase )
model.eval()
model.load_state_dict(lowerCamelCase )
# Check outputs on an image
lowerCAmelCase__ : Dict = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase )
lowerCAmelCase__ : str = prepare_img()
lowerCAmelCase__ : List[str] = image_processor(images=lowerCamelCase , return_tensors="pt" )
lowerCAmelCase__ : Any = encoding["pixel_values"]
lowerCAmelCase__ : Optional[Any] = model(lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = outputs.logits
# verify logits
lowerCAmelCase__ : str = [1, 1_6] if "rvlcdip" in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(lowerCamelCase ), "Shape of logits not as expected"
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
if has_lm_head:
lowerCAmelCase__ : List[Any] = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
lowerCAmelCase__ : Tuple = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__UpperCAmelCase = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 308 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( _a ):
'''simple docstring'''
__UpperCAmelCase = (PNDMScheduler,)
__UpperCAmelCase = (("""num_inference_steps""", 50),)
def lowercase_ (self , **lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**snake_case_ )
return config
def lowercase_ (self , lowerCAmelCase__=0 , **lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Tuple = dict(self.forward_default_kwargs )
_UpperCamelCase : str = kwargs.pop("num_inference_steps" , snake_case_ )
_UpperCamelCase : Dict = self.dummy_sample
_UpperCamelCase : Dict = 0.1 * sample
_UpperCamelCase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCamelCase : Dict = self.get_scheduler_config(**snake_case_ )
_UpperCamelCase : Optional[Any] = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
_UpperCamelCase : Union[str, Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
_UpperCamelCase : Dict = scheduler_class.from_pretrained(snake_case_ )
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
_UpperCamelCase : int = dummy_past_residuals[:]
_UpperCamelCase : Dict = scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
_UpperCamelCase : str = new_scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_UpperCamelCase : int = scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
_UpperCamelCase : List[str] = new_scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase_ (self ):
'''simple docstring'''
pass
def lowercase_ (self , lowerCAmelCase__=0 , **lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : List[Any] = dict(self.forward_default_kwargs )
_UpperCamelCase : List[Any] = kwargs.pop("num_inference_steps" , snake_case_ )
_UpperCamelCase : Union[str, Any] = self.dummy_sample
_UpperCamelCase : Dict = 0.1 * sample
_UpperCamelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCamelCase : List[str] = self.get_scheduler_config()
_UpperCamelCase : List[Any] = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCamelCase : List[str] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
_UpperCamelCase : Optional[Any] = scheduler_class.from_pretrained(snake_case_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residual (must be after setting timesteps)
_UpperCamelCase : str = dummy_past_residuals[:]
_UpperCamelCase : List[str] = scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
_UpperCamelCase : Optional[Any] = new_scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_UpperCamelCase : Tuple = scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
_UpperCamelCase : List[str] = new_scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase_ (self , **lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCamelCase : Dict = self.get_scheduler_config(**snake_case_ )
_UpperCamelCase : Union[str, Any] = scheduler_class(**snake_case_ )
_UpperCamelCase : int = 10
_UpperCamelCase : Union[str, Any] = self.dummy_model()
_UpperCamelCase : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(snake_case_ )
for i, t in enumerate(scheduler.prk_timesteps ):
_UpperCamelCase : Optional[int] = model(snake_case_ , snake_case_ )
_UpperCamelCase : int = scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_UpperCamelCase : Optional[int] = model(snake_case_ , snake_case_ )
_UpperCamelCase : Optional[int] = scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ ).prev_sample
return sample
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = dict(self.forward_default_kwargs )
_UpperCamelCase : Union[str, Any] = kwargs.pop("num_inference_steps" , snake_case_ )
for scheduler_class in self.scheduler_classes:
_UpperCamelCase : int = self.get_scheduler_config()
_UpperCamelCase : Optional[int] = scheduler_class(**snake_case_ )
_UpperCamelCase : str = self.dummy_sample
_UpperCamelCase : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case_ , "set_timesteps" ):
scheduler.set_timesteps(snake_case_ )
elif num_inference_steps is not None and not hasattr(snake_case_ , "set_timesteps" ):
_UpperCamelCase : Optional[int] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCamelCase : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_UpperCamelCase : int = dummy_past_residuals[:]
_UpperCamelCase : List[str] = scheduler.step_prk(snake_case_ , 0 , snake_case_ , **snake_case_ ).prev_sample
_UpperCamelCase : Any = scheduler.step_prk(snake_case_ , 1 , snake_case_ , **snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_UpperCamelCase : List[str] = scheduler.step_plms(snake_case_ , 0 , snake_case_ , **snake_case_ ).prev_sample
_UpperCamelCase : Optional[int] = scheduler.step_plms(snake_case_ , 1 , snake_case_ , **snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase_ (self ):
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def lowercase_ (self ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=snake_case_ )
_UpperCamelCase : Any = self.scheduler_classes[0]
_UpperCamelCase : Tuple = self.get_scheduler_config(steps_offset=1 )
_UpperCamelCase : Dict = scheduler_class(**snake_case_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def lowercase_ (self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def lowercase_ (self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case_ )
def lowercase_ (self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def lowercase_ (self ):
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=snake_case_ )
def lowercase_ (self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=snake_case_ )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : int = 27
for scheduler_class in self.scheduler_classes:
_UpperCamelCase : str = self.dummy_sample
_UpperCamelCase : str = 0.1 * sample
_UpperCamelCase : str = self.get_scheduler_config()
_UpperCamelCase : Any = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_UpperCamelCase : List[str] = scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ ).prev_sample
def lowercase_ (self ):
'''simple docstring'''
with self.assertRaises(snake_case_ ):
_UpperCamelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCamelCase : Tuple = self.get_scheduler_config()
_UpperCamelCase : List[Any] = scheduler_class(**snake_case_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.full_loop()
_UpperCamelCase : Tuple = torch.sum(torch.abs(snake_case_ ) )
_UpperCamelCase : str = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : int = self.full_loop(prediction_type="v_prediction" )
_UpperCamelCase : List[str] = torch.sum(torch.abs(snake_case_ ) )
_UpperCamelCase : List[str] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.full_loop(set_alpha_to_one=snake_case_ , beta_start=0.01 )
_UpperCamelCase : str = torch.sum(torch.abs(snake_case_ ) )
_UpperCamelCase : Union[str, Any] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : str = self.full_loop(set_alpha_to_one=snake_case_ , beta_start=0.01 )
_UpperCamelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_UpperCamelCase : int = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 720 |
"""simple docstring"""
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
_UpperCamelCase : Any = ""
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : Dict = []
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_UpperCamelCase : List[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_UpperCamelCase : Optional[int] = self.__min_dist_top_down_dp(lowerCAmelCase__ , n - 1 )
_UpperCamelCase : Any = self.__min_dist_top_down_dp(m - 1 , lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
_UpperCamelCase : List[str] = 1 + min(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return self.dp[m][n]
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Tuple = worda
_UpperCamelCase : List[str] = worda
_UpperCamelCase : Union[str, Any] = [[-1 for _ in range(len(lowerCAmelCase__ ) )] for _ in range(len(lowerCAmelCase__ ) )]
return self.__min_dist_top_down_dp(len(lowerCAmelCase__ ) - 1 , len(lowerCAmelCase__ ) - 1 )
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Any = worda
_UpperCamelCase : Optional[int] = worda
_UpperCamelCase : Tuple = len(lowerCAmelCase__ )
_UpperCamelCase : Tuple = len(lowerCAmelCase__ )
_UpperCamelCase : Any = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_UpperCamelCase : List[Any] = j
elif j == 0: # second string is empty
_UpperCamelCase : Optional[int] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_UpperCamelCase : Tuple = self.dp[i - 1][j - 1]
else:
_UpperCamelCase : List[str] = self.dp[i][j - 1]
_UpperCamelCase : Any = self.dp[i - 1][j]
_UpperCamelCase : Optional[int] = self.dp[i - 1][j - 1]
_UpperCamelCase : Optional[Any] = 1 + min(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return self.dp[m][n]
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
_SCREAMING_SNAKE_CASE = input("""Enter the first string: """).strip()
_SCREAMING_SNAKE_CASE = input("""Enter the second string: """).strip()
print()
print(f'The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}')
print(f'The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}')
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 239 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Any = logging.get_logger(__name__)
a : Optional[int] = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[Any] ="""gpt_neox"""
def __init__( self , lowerCAmelCase__=5_0432 , lowerCAmelCase__=6144 , lowerCAmelCase__=44 , lowerCAmelCase__=64 , lowerCAmelCase__=2_4576 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.25 , lowerCAmelCase__=1_0000 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=2048 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=True , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Optional[Any]:
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
a : List[str] = vocab_size
a : Any = max_position_embeddings
a : Tuple = hidden_size
a : Dict = num_hidden_layers
a : List[Any] = num_attention_heads
a : Optional[Any] = intermediate_size
a : List[str] = hidden_act
a : str = rotary_pct
a : Any = rotary_emb_base
a : int = attention_dropout
a : Union[str, Any] = hidden_dropout
a : Tuple = classifier_dropout
a : Any = initializer_range
a : Any = layer_norm_eps
a : Optional[int] = use_cache
a : Optional[int] = tie_word_embeddings
a : str = use_parallel_residual
a : Any = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def __a ( self ) -> int:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCAmelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
a : int = self.rope_scaling.get("type" , lowerCAmelCase__ )
a : Any = self.rope_scaling.get("factor" , lowerCAmelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 633 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Dict:
'''simple docstring'''
a : Any = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
a : str = True if "large" in model_name or "huge" in model_name else False
a : Optional[Any] = True if "large" in model_name or "huge" in model_name else False
a : Dict = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a : Union[str, Any] = [3, 3, 3, 3]
a : List[str] = [5, 5, 5, 5]
elif "fl4" in model_name:
a : Any = [4, 4, 4, 4]
a : Optional[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a : Dict = [3, 3, 3, 3]
if "lrf" in model_name:
a : Optional[int] = [3, 3, 3, 3]
else:
a : Tuple = [2, 2, 2, 2]
if "tiny" in model_name:
a : List[str] = 96
elif "small" in model_name:
a : Union[str, Any] = 96
elif "base" in model_name:
a : Dict = 128
elif "large" in model_name:
a : Union[str, Any] = 192
elif "xlarge" in model_name:
a : Tuple = 256
elif "huge" in model_name:
a : List[str] = 352
# set label information
a : List[Any] = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a : Optional[int] = "imagenet-22k-id2label.json"
else:
a : List[str] = "imagenet-1k-id2label.json"
a : Optional[int] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="dataset" ) , "r" ) )
a : str = {int(_lowercase ): v for k, v in idalabel.items()}
a : List[str] = {v: k for k, v in idalabel.items()}
a : Dict = FocalNetConfig(
embed_dim=_lowercase , depths=_lowercase , focal_levels=_lowercase , focal_windows=_lowercase , use_conv_embed=_lowercase , idalabel=_lowercase , labelaid=_lowercase , use_post_layernorm=_lowercase , use_layerscale=_lowercase , )
return config
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->List[Any]:
'''simple docstring'''
if "patch_embed.proj" in name:
a : Any = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a : List[str] = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a : List[Any] = "encoder." + name
if "encoder.layers" in name:
a : int = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a : Any = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a : str = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a : Union[str, Any] = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a : Dict = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a : Any = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a : str = "layernorm.weight"
if name == "norm.bias":
a : Optional[Any] = "layernorm.bias"
if "head" in name:
a : Tuple = name.replace("head" , "classifier" )
else:
a : int = "focalnet." + name
return name
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : Tuple=False ) ->str:
'''simple docstring'''
a : List[Any] = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a : str = model_name_to_url[model_name]
print("Checkpoint URL: " , _lowercase )
a : Any = torch.hub.load_state_dict_from_url(_lowercase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a : Any = state_dict.pop(_lowercase )
a : Any = val
a : Any = get_focalnet_config(_lowercase )
a : Optional[int] = FocalNetForImageClassification(_lowercase )
model.eval()
# load state dict
model.load_state_dict(_lowercase )
# verify conversion
a : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
a : Optional[int] = BitImageProcessor(
do_resize=_lowercase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_lowercase , crop_size=224 , do_normalize=_lowercase , image_mean=_lowercase , image_std=_lowercase , )
a : int = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
a : Dict = processor(images=_lowercase , return_tensors="pt" )
a : Optional[int] = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a : str = image_transforms(_lowercase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _lowercase , atol=1E-4 )
a : Dict = model(**_lowercase )
a : List[str] = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a : Union[str, Any] = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a : Dict = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a : Dict = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a : Any = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a : str = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
a : Tuple = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 633 | 1 |
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[str] =""
for i in table:
res += inp[i - 1]
return res
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
return data[1:] + data[0]
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[str] =""
for i in range(len(__a ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCAmelCase_ ( __a , __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =int("0b" + data[0] + data[-1] , 2 )
lowerCamelCase__: List[str] =int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =message[:4]
lowerCamelCase__: str =message[4:]
lowerCamelCase__: Dict =apply_table(__a , __a )
lowerCamelCase__: List[str] =xor(__a , __a )
lowerCamelCase__: Optional[int] =apply_sbox(__a , temp[:4] ) # noqa: E741
lowerCamelCase__: Tuple =apply_sbox(__a , temp[4:] )
lowerCamelCase__: List[str] ="0" * (2 - len(__a )) + l # noqa: E741
lowerCamelCase__: List[str] ="0" * (2 - len(__a )) + r
lowerCamelCase__: Dict =apply_table(l + r , __a )
lowerCamelCase__: List[str] =xor(__a , __a )
return temp + right
if __name__ == "__main__":
__A = input("Enter 10 bit key: ")
__A = input("Enter 8 bit message: ")
__A = [6, 3, 7, 4, 8, 5, 10, 9]
__A = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__A = [2, 4, 3, 1]
__A = [2, 6, 3, 1, 4, 8, 5, 7]
__A = [4, 1, 3, 5, 7, 2, 8, 6]
__A = [4, 1, 2, 3, 2, 3, 4, 1]
__A = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__A = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__A = apply_table(key, paa_table)
__A = temp[:5]
__A = temp[5:]
__A = left_shift(left)
__A = left_shift(right)
__A = apply_table(left + right, pa_table)
__A = left_shift(left)
__A = left_shift(right)
__A = left_shift(left)
__A = left_shift(right)
__A = apply_table(left + right, pa_table)
# encryption
__A = apply_table(message, IP)
__A = function(expansion, sa, sa, keya, temp)
__A = temp[4:] + temp[:4]
__A = function(expansion, sa, sa, keya, temp)
__A = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
__A = apply_table(CT, IP)
__A = function(expansion, sa, sa, keya, temp)
__A = temp[4:] + temp[:4]
__A = function(expansion, sa, sa, keya, temp)
__A = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 717 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Dict =self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "tf_padding"))
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "depth_multiplier"))
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[Any]=32 , UpperCAmelCase_ : Any=0.25 , UpperCAmelCase_ : int=8 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : Any=6 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Dict="relu6" , UpperCAmelCase_ : Optional[int]=1_280 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[Any]=10 , UpperCAmelCase_ : Optional[int]=None , ) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =parent
lowerCamelCase__: Optional[Any] =batch_size
lowerCamelCase__: List[str] =num_channels
lowerCamelCase__: Dict =image_size
lowerCamelCase__: Tuple =depth_multiplier
lowerCamelCase__: Tuple =depth_divisible_by
lowerCamelCase__: List[str] =min_depth
lowerCamelCase__: List[str] =expand_ratio
lowerCamelCase__: Union[str, Any] =tf_padding
lowerCamelCase__: Optional[Any] =output_stride
lowerCamelCase__: Tuple =first_layer_is_expansion
lowerCamelCase__: Any =finegrained_output
lowerCamelCase__: Union[str, Any] =hidden_act
lowerCamelCase__: Union[str, Any] =last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier)
lowerCamelCase__: int =classifier_dropout_prob
lowerCamelCase__: List[str] =use_labels
lowerCamelCase__: Any =is_training
lowerCamelCase__: Dict =num_labels
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: List[Any] =scope
def SCREAMING_SNAKE_CASE_ (self : int) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCamelCase__: Dict =None
lowerCamelCase__: int =None
if self.use_labels:
lowerCamelCase__: List[str] =ids_tensor([self.batch_size] , self.num_labels)
lowerCamelCase__: List[Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
lowerCamelCase__: Any =self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[int]:
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]) ->int:
'''simple docstring'''
lowerCamelCase__: List[str] =MobileNetVaModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Dict =model(UpperCAmelCase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: int =self.num_labels
lowerCamelCase__: Optional[int] =MobileNetVaForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.num_labels
lowerCamelCase__: List[str] =MobileNetVaForSemanticSegmentation(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Any =model(UpperCAmelCase_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__: List[str] =model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =config_and_inputs
lowerCamelCase__: Tuple ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase_ = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =MobileNetVaModelTester(self)
lowerCamelCase__: Union[str, Any] =MobileNetVaConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds")
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Any:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings")
def SCREAMING_SNAKE_CASE_ (self : int) ->str:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV2 does not output attentions")
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Optional[Any] =model_class(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__: Tuple =[*signature.parameters.keys()]
lowerCamelCase__: Union[str, Any] =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Any:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str):
lowerCamelCase__: List[str] =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
lowerCamelCase__: Any =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
lowerCamelCase__: Optional[Any] =outputs.hidden_states
lowerCamelCase__: List[str] =16
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Union[str, Any] =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__: Optional[int] =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Dict:
'''simple docstring'''
lowerCamelCase__: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Union[str, Any]:
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: Optional[int] =MobileNetVaModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def lowerCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: List[Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]:
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224") if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224").to(UpperCAmelCase_)
lowerCamelCase__: Dict =self.default_image_processor
lowerCamelCase__: str =prepare_img()
lowerCamelCase__: int =image_processor(images=UpperCAmelCase_ , return_tensors="pt").to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
lowerCamelCase__: str =model(**UpperCAmelCase_)
# verify the logits
lowerCamelCase__: Optional[Any] =torch.Size((1, 1_001))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
lowerCamelCase__: List[str] =torch.tensor([0.2445, -1.1993, 0.1905]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4))
@slow
def SCREAMING_SNAKE_CASE_ (self : int) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[Any] =MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
lowerCamelCase__: str =model.to(UpperCAmelCase_)
lowerCamelCase__: List[Any] =MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
lowerCamelCase__: int =prepare_img()
lowerCamelCase__: int =image_processor(images=UpperCAmelCase_ , return_tensors="pt").to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
lowerCamelCase__: str =model(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =outputs.logits
# verify the logits
lowerCamelCase__: Optional[int] =torch.Size((1, 21, 65, 65))
self.assertEqual(logits.shape , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=UpperCAmelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1E-4))
| 437 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.