code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
__A : Any = logging.getLogger(__name__)
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Tuple = "masked_bert"
def __init__( self : Optional[int] , A : int=3_05_22 , A : int=7_68 , A : List[Any]=12 , A : Union[str, Any]=12 , A : List[str]=30_72 , A : Dict="gelu" , A : Any=0.1 , A : int=0.1 , A : Optional[Any]=5_12 , A : Union[str, Any]=2 , A : Any=0.02 , A : str=1e-12 , A : Optional[int]=0 , A : Union[str, Any]="topK" , A : Union[str, Any]="constant" , A : Optional[int]=0.0 , **A : List[str] , ) -> int:
super().__init__(pad_token_id=A , **A )
lowercase_ : str = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : Any = intermediate_size
lowercase_ : Tuple = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : Tuple = max_position_embeddings
lowercase_ : Union[str, Any] = type_vocab_size
lowercase_ : Optional[int] = initializer_range
lowercase_ : List[Any] = layer_norm_eps
lowercase_ : Any = pruning_method
lowercase_ : Dict = mask_init
lowercase_ : Optional[Any] = mask_scale
| 33 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCAmelCase : int = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
_lowerCAmelCase : Tuple = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
_lowerCAmelCase : int = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[List[List[str]]] , snake_case :List[List[str]] , snake_case :int = 1 , snake_case :int = 4 , ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=snake_case , hypotheses=snake_case , min_len=snake_case , max_len=snake_case )
}
| 300 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class a__ :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=1_0000 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(default=2E-4 , metadata={'help': 'Learning rate fo training.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=750 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=16 , metadata={'help': 'Number of gradient accumulation steps.'} )
_SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=__lowerCAmelCase , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=5_0000 , metadata={'help': 'Maximum number of training steps.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=1024 , metadata={'help': 'Sequence lengths used for training.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=1024 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowerCAmelCase , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
_SCREAMING_SNAKE_CASE : Optional[bool] = field(default=__lowerCAmelCase , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class a__ :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=1024 , metadata={'help': 'Length of sequences to be evaluated.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class a__ :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=__lowerCAmelCase , metadata={'help': 'Number of workers used for code evaluation.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowerCAmelCase , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
_SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=__lowerCAmelCase , metadata={'help': 'Sample from the language model\'s output distribution.'} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=256 , metadata={'help': 'Maximum number of newly generated tokens.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=200 , metadata={'help': 'Number of completions to generate for each sample.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class a__ :
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=10_0000 , metadata={'help': 'Number of files to save per JSON output file.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=1000 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=100 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
_SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=__lowerCAmelCase , metadata={'help': 'If True, near-duplicate samples are removed.'} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class a__ :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=20_0000 , metadata={'help': 'Number of examples to train tokenizer on.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=3_2768 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
_SCREAMING_SNAKE_CASE : Optional[bool] = field(default=__lowerCAmelCase , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class a__ :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=__lowerCAmelCase , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class a__ :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
_SCREAMING_SNAKE_CASE : Optional[bool] = field(default=__lowerCAmelCase , metadata={'help': 'Push saved tokenizer to the hub.'} )
| 365 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Any = 'roc_bert'
def __init__( self , _UpperCamelCase=30522 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=2 , _UpperCamelCase=0.0_2 , _UpperCamelCase=1E-1_2 , _UpperCamelCase=True , _UpperCamelCase=0 , _UpperCamelCase="absolute" , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=768 , _UpperCamelCase=910 , _UpperCamelCase=512 , _UpperCamelCase=24858 , _UpperCamelCase=True , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : str = vocab_size
_lowercase : List[str] = max_position_embeddings
_lowercase : List[Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Dict = initializer_range
_lowercase : List[Any] = type_vocab_size
_lowercase : Tuple = layer_norm_eps
_lowercase : Optional[int] = use_cache
_lowercase : Tuple = enable_pronunciation
_lowercase : Optional[int] = enable_shape
_lowercase : int = pronunciation_embed_dim
_lowercase : List[str] = pronunciation_vocab_size
_lowercase : int = shape_embed_dim
_lowercase : str = shape_vocab_size
_lowercase : str = concat_input
_lowercase : Dict = position_embedding_type
_lowercase : Optional[Any] = classifier_dropout
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
| 199 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = '''▁'''
lowerCamelCase_ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase_ = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
lowerCamelCase_ = {
'''google/pegasus-xsum''': 5_12,
}
lowerCamelCase_ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple="<pad>" , __UpperCAmelCase : Optional[int]="</s>" , __UpperCAmelCase : str="<unk>" , __UpperCAmelCase : Dict="<mask_2>" , __UpperCAmelCase : Union[str, Any]="<mask_1>" , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Union[str, Any]=103 , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Tuple , ):
'''simple docstring'''
_A = offset
if additional_special_tokens is not None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(__UpperCAmelCase )}, but is'''
f''' {type(__UpperCAmelCase )}''' )
_A = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(__UpperCAmelCase ) , self.offset - 1 )
]
if len(set(__UpperCAmelCase ) ) != len(__UpperCAmelCase ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_A = additional_special_tokens_extended
else:
_A = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token_sent=__UpperCAmelCase , offset=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_A = mask_token_sent
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
# add special tokens to encoder dict
_A = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
_A = {v: k for k, v in self.encoder.items()}
@property
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return len(self.sp_model ) + self.offset
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : str , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
_A = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : str ):
'''simple docstring'''
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def lowerCAmelCase ( self : int , __UpperCAmelCase : str ):
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
_A = self.sp_model.piece_to_id(__UpperCAmelCase )
return sp_id + self.offset
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : int ):
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
_A = self.sp_model.IdToPiece(index - self.offset )
return token
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
_A = []
_A = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
_A = []
else:
current_sub_tokens.append(__UpperCAmelCase )
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Tuple=False ):
'''simple docstring'''
return 1
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
_A = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List , __UpperCAmelCase : Optional[List] = None , __UpperCAmelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(__UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(__UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Any=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , "wb" ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 79 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 79 | 1 |
'''simple docstring'''
def lowerCamelCase__ ( _A , _A , _A ):
if exponent == 1:
return base
if exponent % 2 == 0:
a : int = _modexpt(_A , exponent // 2 , _A ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_A , exponent - 1 , _A )) % modulo_value
def lowerCamelCase__ ( _A = 1777 , _A = 1855 , _A = 8 ):
a : str = base
for _ in range(1 , _A ):
a : List[str] = _modexpt(_A , _A , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }") | 96 |
'''simple docstring'''
import torch
from transformers import AutoModel
class a__( torch.nn.Module ):
def __init__( self : Any , __snake_case : str="sayef/fsner-bert-base-uncased" ):
super(__snake_case , self ).__init__()
a : List[str] = AutoModel.from_pretrained(__snake_case , return_dict=__snake_case )
a : Optional[Any] = torch.nn.CosineSimilarity(3 , 1e-0_8 )
a : Tuple = torch.nn.Softmax(dim=1 )
def lowercase_ ( self : List[str] , **__snake_case : int ):
return self.bert(**__snake_case ).last_hidden_state
def lowercase_ ( self : Optional[int] , __snake_case : Union[str, Any] ):
return token_embeddings.sum(2 , keepdim=__snake_case )
def lowercase_ ( self : str , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Optional[Any]=1 ):
return self.softmax(T * self.cos(__snake_case , __snake_case ) )
def lowercase_ ( self : int , __snake_case : int , __snake_case : Tuple ):
a : List[Any] = W_supports['sizes'].tolist()
a : Any = W_supports['start_token_id'].item()
a : int = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
a : Optional[Any] = self.BERT(**__snake_case )
a : Tuple = self.BERT(**__snake_case )
a : Dict = None
a : Optional[Any] = None
a : Union[str, Any] = W_supports['input_ids'] == start_token_id
a : str = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__snake_case ):
if i == 0:
a : Optional[int] = 0
else:
a : Tuple = support_sizes[i - 1]
a : Tuple = S[s : s + size][start_token_masks[s : s + size]]
a : int = S[s : s + size][end_token_masks[s : s + size]]
a : Any = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
a : List[str] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
a : List[Any] = torch.vstack((p_starts, p_start) )
a : List[Any] = torch.vstack((p_ends, p_end) )
else:
a : List[Any] = p_start
a : Optional[int] = p_end
return p_starts, p_ends | 96 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Dict = ['''pixel_values''']
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = PILImageResampling.BICUBIC , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_5_5 , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ , param_name="""crop_size""")
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = PILImageResampling.BILINEAR , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase__)
if "shortest_edge" in size:
__SCREAMING_SNAKE_CASE = get_resize_output_image_size(lowerCAmelCase__ , size=size["""shortest_edge"""] , default_to_square=lowerCAmelCase__)
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__SCREAMING_SNAKE_CASE = (size["""height"""], size["""width"""])
else:
raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}")
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}")
return center_crop(lowerCAmelCase__ , size=(size["""height"""], size["""width"""]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__):
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase__ , param_name="""crop_size""" , default_to_square=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase__)
if not is_batched(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = [images]
if not valid_images(lowerCAmelCase__):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images]
if do_center_crop:
__SCREAMING_SNAKE_CASE = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images]
__SCREAMING_SNAKE_CASE = {"""pixel_values""": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__)
| 100 |
'''simple docstring'''
import torch
from torch import nn
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int]=1 ,lowercase__ : Optional[Any]=False ):
super().__init__()
__lowercase = n_token
__lowercase = d_embed
__lowercase = d_proj
__lowercase = cutoffs + [n_token]
__lowercase = [0] + self.cutoffs
__lowercase = div_val
__lowercase = self.cutoffs[0]
__lowercase = len(self.cutoffs ) - 1
__lowercase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
__lowercase = nn.Parameter(torch.zeros(self.n_clusters ,self.d_embed ) )
__lowercase = nn.Parameter(torch.zeros(self.n_clusters ) )
__lowercase = nn.ModuleList()
__lowercase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase__ ,lowercase__ ) ) )
else:
self.out_projs.append(lowercase__ )
self.out_layers.append(nn.Linear(lowercase__ ,lowercase__ ) )
else:
for i in range(len(self.cutoffs ) ):
__lowercase , __lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowercase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase__ ,lowercase__ ) ) )
self.out_layers.append(nn.Linear(lowercase__ ,r_idx - l_idx ) )
__lowercase = keep_order
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : Any ):
if proj is None:
__lowercase = nn.functional.linear(lowercase__ ,lowercase__ ,bias=lowercase__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
__lowercase = nn.functional.linear(lowercase__ ,proj.t().contiguous() )
__lowercase = nn.functional.linear(lowercase__ ,lowercase__ ,bias=lowercase__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Any=None ,lowercase__ : List[str]=False ):
if labels is not None:
# Shift so that tokens < n predict n
__lowercase = hidden[..., :-1, :].contiguous()
__lowercase = labels[..., 1:].contiguous()
__lowercase = hidden.view(-1 ,hidden.size(-1 ) )
__lowercase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
__lowercase = hidden.view(-1 ,hidden.size(-1 ) )
if self.n_clusters == 0:
__lowercase = self._compute_logit(lowercase__ ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
if labels is not None:
__lowercase = labels != -1_0_0
__lowercase = torch.zeros_like(lowercase__ ,dtype=hidden.dtype ,device=hidden.device )
__lowercase = (
-nn.functional.log_softmax(lowercase__ ,dim=-1 )[mask].gather(1 ,labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
__lowercase = nn.functional.log_softmax(lowercase__ ,dim=-1 )
else:
# construct weights and biases
__lowercase , __lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__lowercase , __lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowercase = self.out_layers[0].weight[l_idx:r_idx]
__lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
__lowercase = self.out_layers[i].weight
__lowercase = self.out_layers[i].bias
if i == 0:
__lowercase = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
__lowercase = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(lowercase__ )
biases.append(lowercase__ )
__lowercase , __lowercase , __lowercase = weights[0], biases[0], self.out_projs[0]
__lowercase = self._compute_logit(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = nn.functional.log_softmax(lowercase__ ,dim=1 )
if labels is None:
__lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
__lowercase = torch.zeros_like(lowercase__ ,dtype=hidden.dtype ,device=hidden.device )
__lowercase = 0
__lowercase = [0] + self.cutoffs
for i in range(len(lowercase__ ) - 1 ):
__lowercase , __lowercase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
__lowercase = (labels >= l_idx) & (labels < r_idx)
__lowercase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
__lowercase = labels.index_select(0 ,lowercase__ ) - l_idx
__lowercase = head_logprob.index_select(0 ,lowercase__ )
__lowercase = hidden.index_select(0 ,lowercase__ )
else:
__lowercase = hidden
if i == 0:
if labels is not None:
__lowercase = head_logprob_i.gather(1 ,target_i[:, None] ).squeeze(1 )
else:
__lowercase = head_logprob[:, : self.cutoffs[0]]
else:
__lowercase , __lowercase , __lowercase = weights[i], biases[i], self.out_projs[i]
__lowercase = self._compute_logit(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = nn.functional.log_softmax(lowercase__ ,dim=1 )
__lowercase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
__lowercase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 ,target_i[:, None] ).squeeze(1 )
else:
__lowercase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
__lowercase = logprob_i
if labels is not None:
if (hasattr(self ,'''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 ,lowercase__ ,-logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Union[str, Any] ):
if self.n_clusters == 0:
__lowercase = self._compute_logit(lowercase__ ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
return nn.functional.log_softmax(lowercase__ ,dim=-1 )
else:
# construct weights and biases
__lowercase , __lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__lowercase , __lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowercase = self.out_layers[0].weight[l_idx:r_idx]
__lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
__lowercase = self.out_layers[i].weight
__lowercase = self.out_layers[i].bias
if i == 0:
__lowercase = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
__lowercase = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(lowercase__ )
biases.append(lowercase__ )
__lowercase , __lowercase , __lowercase = weights[0], biases[0], self.out_projs[0]
__lowercase = self._compute_logit(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
__lowercase = nn.functional.log_softmax(lowercase__ ,dim=1 )
__lowercase = [0] + self.cutoffs
for i in range(len(lowercase__ ) - 1 ):
__lowercase , __lowercase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
__lowercase = head_logprob[:, : self.cutoffs[0]]
else:
__lowercase , __lowercase , __lowercase = weights[i], biases[i], self.out_projs[i]
__lowercase = self._compute_logit(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = nn.functional.log_softmax(lowercase__ ,dim=1 )
__lowercase = head_logprob[:, -i] + tail_logprob_i
__lowercase = logprob_i
return out
| 104 | 0 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_lowerCAmelCase : Any = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Dict , *snake_case :str , **snake_case :Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , snake_case , )
super().__init__(*snake_case , **snake_case )
| 70 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Union[str, Any] = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70 | 1 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class A__ ( _snake_case , unittest.TestCase ):
lowercase = CpmAntTokenizer
lowercase = False
def snake_case_ ( self ) -> str:
'''simple docstring'''
super().setUp()
A_ = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
A_ = """今天天气真好!"""
A_ = ["""今天""", """天气""", """真""", """好""", """!"""]
A_ = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
A_ = """今天天气真好!"""
A_ = [tokenizer.bos_token] + tokens
A_ = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
A_ = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 162 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( _snake_case , unittest.TestCase ):
lowercase = ShapEPipeline
lowercase = ["prompt"]
lowercase = ["prompt"]
lowercase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
lowercase = False
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return 8
@property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
A_ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
A_ = PriorTransformer(**UpperCamelCase__ )
return model
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
A_ = ShapERenderer(**UpperCamelCase__ )
return model
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.dummy_prior
A_ = self.dummy_text_encoder
A_ = self.dummy_tokenizer
A_ = self.dummy_renderer
A_ = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=UpperCamelCase__ , clip_sample=UpperCamelCase__ , clip_sample_range=1.0 , )
A_ = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> List[str]:
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("""mps""" ):
A_ = torch.manual_seed(UpperCamelCase__ )
else:
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A_ = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = """cpu"""
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCamelCase__ )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A_ = output.images[0]
A_ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
A_ = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCamelCase__ )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = 1
A_ = 2
A_ = self.get_dummy_inputs(UpperCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
A_ = batch_size * [inputs[key]]
A_ = pipe(**UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
A_ = ShapEPipeline.from_pretrained("""openai/shap-e""" )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
A_ = pipe(
"""a shark""" , generator=UpperCamelCase__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 162 | 1 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
UpperCAmelCase: Dict = """__DUMMY_TRANSFORMERS_USER__"""
UpperCAmelCase: str = """Dummy User"""
UpperCAmelCase: Optional[int] = """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"""
UpperCAmelCase: Any = """https://hub-ci.huggingface.co"""
UpperCAmelCase: Any = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
UpperCAmelCase: Optional[Any] = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
UpperCAmelCase: Optional[Any] = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __UpperCAmelCase )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __UpperCAmelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __UpperCAmelCase )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __UpperCAmelCase )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
HfFolder.save_token(__UpperCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def __SCREAMING_SNAKE_CASE ( ):
return HfApi(endpoint=__UpperCAmelCase )
@pytest.fixture(scope="""session""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : int = HfFolder.get_token()
HfFolder.save_token(__UpperCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__UpperCAmelCase )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
def _cleanup_repo(__UpperCAmelCase ):
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
@contextmanager
def _temporary_repo(__UpperCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(__UpperCAmelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Dict = F"""repo_txt_data-{int(time.time() * 1_0E3 )}"""
_lowercase : List[Any] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Dict = F"""repo_zipped_txt_data-{int(time.time() * 1_0E3 )}"""
_lowercase : Dict = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : List[str] = F"""repo_zipped_img_data-{int(time.time() * 1_0E3 )}"""
_lowercase : Dict = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
return hf_private_dataset_repo_zipped_img_data_
| 359 |
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if k in (0.04, 0.06):
_lowercase : Optional[Any] = k
_lowercase : Optional[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self ):
return str(self.k )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = cva.imread(UpperCAmelCase_ ,0 )
_lowercase , _lowercase : Dict = img.shape
_lowercase : list[list[int]] = []
_lowercase : int = img.copy()
_lowercase : List[str] = cva.cvtColor(UpperCAmelCase_ ,cva.COLOR_GRAY2RGB )
_lowercase , _lowercase : Optional[Any] = np.gradient(UpperCAmelCase_ )
_lowercase : Optional[int] = dx**2
_lowercase : Optional[Any] = dy**2
_lowercase : Optional[Any] = dx * dy
_lowercase : List[str] = 0.04
_lowercase : Optional[Any] = self.window_size // 2
for y in range(UpperCAmelCase_ ,h - offset ):
for x in range(UpperCAmelCase_ ,w - offset ):
_lowercase : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Dict = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Union[str, Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : int = (wxx * wyy) - (wxy**2)
_lowercase : Union[str, Any] = wxx + wyy
_lowercase : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) ,0 )
color_img.itemset((y, x, 1) ,0 )
color_img.itemset((y, x, 2) ,2_55 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = HarrisCorner(0.04, 3)
UpperCAmelCase , UpperCAmelCase: List[Any] = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 336 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Tuple=0 ):
__lowercase = np.random.RandomState(lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**lowercase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**lowercase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
__lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**lowercase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**lowercase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**lowercase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**lowercase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs['''prompt''']]
# forward
__lowercase = pipe(**lowercase__ )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop('''prompt''' )]
__lowercase = pipe.tokenizer(
lowercase__ ,padding='''max_length''' ,max_length=pipe.tokenizer.model_max_length ,truncation=lowercase__ ,return_tensors='''np''' ,)
__lowercase = text_inputs['''input_ids''']
__lowercase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__lowercase = prompt_embeds
# forward
__lowercase = pipe(**lowercase__ )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * ['''this is a negative prompt''']
__lowercase = negative_prompt
__lowercase = 3 * [inputs['''prompt''']]
# forward
__lowercase = pipe(**lowercase__ )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop('''prompt''' )]
__lowercase = []
for p in [prompt, negative_prompt]:
__lowercase = pipe.tokenizer(
lowercase__ ,padding='''max_length''' ,max_length=pipe.tokenizer.model_max_length ,truncation=lowercase__ ,return_tensors='''np''' ,)
__lowercase = text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__lowercase , __lowercase = embeds
# forward
__lowercase = pipe(**lowercase__ )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def SCREAMING_SNAKE_CASE ( self : str ):
# using the PNDM scheduler by default
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''onnx''' ,safety_checker=lowercase__ ,feature_extractor=lowercase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = '''A painting of a squirrel eating a burger'''
np.random.seed(0 )
__lowercase = sd_pipe([prompt] ,guidance_scale=6.0 ,num_inference_steps=1_0 ,output_type='''np''' )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' ,subfolder='''scheduler''' ,revision='''onnx''' )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' ,revision='''onnx''' ,scheduler=lowercase__ ,safety_checker=lowercase__ ,feature_extractor=lowercase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = '''open neural network exchange'''
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=1_0 ,generator=lowercase__ ,output_type='''np''' )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' ,subfolder='''scheduler''' ,revision='''onnx''' )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' ,revision='''onnx''' ,scheduler=lowercase__ ,safety_checker=lowercase__ ,feature_extractor=lowercase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = '''open neural network exchange'''
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=1_0 ,generator=lowercase__ ,output_type='''np''' )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = 0
def test_callback_fn(lowercase__ : int ,lowercase__ : int ,lowercase__ : np.ndarray ) -> None:
__lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 6_4, 6_4)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 6_4, 6_4)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__lowercase = False
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' ,revision='''onnx''' ,safety_checker=lowercase__ ,feature_extractor=lowercase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = '''Andromeda galaxy in a bottle'''
__lowercase = np.random.RandomState(0 )
pipe(
prompt=lowercase__ ,num_inference_steps=5 ,guidance_scale=7.5 ,generator=lowercase__ ,callback=lowercase__ ,callback_steps=1 ,)
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' ,revision='''onnx''' ,safety_checker=lowercase__ ,feature_extractor=lowercase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
assert isinstance(lowercase__ ,lowercase__ )
assert pipe.safety_checker is None
__lowercase = pipe('''example prompt''' ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase__ )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(lowercase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowercase = pipe('''example prompt''' ,num_inference_steps=2 ).images[0]
assert image is not None
| 104 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(lowerCAmelCase__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 365 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
"""simple docstring"""
def __init__( self : Any,lowercase_ : Optional[int],lowercase_ : Optional[int]=1_3,lowercase_ : int=7,lowercase_ : List[str]=True,lowercase_ : str=True,lowercase_ : List[str]=True,lowercase_ : Optional[Any]=True,lowercase_ : Dict=9_9,lowercase_ : Dict=2_4,lowercase_ : Union[str, Any]=2,lowercase_ : str=6,lowercase_ : Dict=3_7,lowercase_ : Optional[Any]="gelu",lowercase_ : Any=0.1,lowercase_ : Any=0.1,lowercase_ : Any=5_1_2,lowercase_ : Dict=1_6,lowercase_ : List[str]=2,lowercase_ : Dict=0.02,lowercase_ : Any=3,lowercase_ : Dict=None,lowercase_ : List[str]=1_0_0_0,)-> Optional[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = scope
A__ = range_bbox
def snake_case__ ( self : List[Any] )-> Tuple:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
A__ = ids_tensor([self.batch_size, self.seq_length, 4],self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A__ = bbox[i, j, 3]
A__ = bbox[i, j, 1]
A__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A__ = bbox[i, j, 2]
A__ = bbox[i, j, 0]
A__ = t
A__ = None
if self.use_input_mask:
A__ = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
A__ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self : Dict )-> int:
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,)
def snake_case__ ( self : Optional[Any],lowercase_ : Tuple,lowercase_ : str,lowercase_ : Optional[int],lowercase_ : Optional[Any],lowercase_ : str,lowercase_ : List[str],lowercase_ : Tuple,)-> Optional[Any]:
'''simple docstring'''
A__ = LiltModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,bbox=lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_ )
A__ = model(lowercase_,bbox=lowercase_,token_type_ids=lowercase_ )
A__ = model(lowercase_,bbox=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape,(self.batch_size, self.hidden_size) )
def snake_case__ ( self : Any,lowercase_ : Dict,lowercase_ : List[Any],lowercase_ : List[str],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : List[Any],)-> List[str]:
'''simple docstring'''
A__ = self.num_labels
A__ = LiltForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(
lowercase_,bbox=lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : int,lowercase_ : Union[str, Any],lowercase_ : int,lowercase_ : Tuple,lowercase_ : Any,lowercase_ : Optional[int],lowercase_ : Tuple,lowercase_ : List[str],)-> Any:
'''simple docstring'''
A__ = LiltForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(
lowercase_,bbox=lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,start_positions=lowercase_,end_positions=lowercase_,)
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def snake_case__ ( self : Optional[int] )-> Tuple:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
def snake_case__ ( self : List[str],lowercase_ : int,lowercase_ : List[str],lowercase_ : str,lowercase_ : Optional[Any],lowercase_ : Optional[Any] )-> Any:
'''simple docstring'''
return True
def snake_case__ ( self : int )-> Tuple:
'''simple docstring'''
A__ = LiltModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,hidden_size=3_7 )
def snake_case__ ( self : List[Any] )-> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Dict )-> Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : str )-> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
def snake_case__ ( self : List[Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
@slow
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = LiltModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
@slow
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[Any] )-> Dict:
'''simple docstring'''
A__ = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowercase_ )
A__ = torch.tensor([[1, 2]],device=lowercase_ )
A__ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]],device=lowercase_ )
# forward pass
with torch.no_grad():
A__ = model(input_ids=lowercase_,bbox=lowercase_ )
A__ = torch.Size([1, 2, 7_6_8] )
A__ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]],device=lowercase_,)
self.assertTrue(outputs.last_hidden_state.shape,lowercase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3],lowercase_,atol=1E-3 ) )
| 282 | 0 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
UpperCAmelCase = get_logger(__name__)
class lowerCAmelCase :
def __init__( self : Tuple , __lowercase : Dict , __lowercase : Any=None ):
"""simple docstring"""
__lowercase =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
__lowercase =module._original_module if isinstance(SCREAMING_SNAKE_CASE__ , _PatchedModuleObj ) else module
class lowerCAmelCase :
lowerCAmelCase_ = []
def __init__( self : Tuple , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : Tuple , __lowercase : Tuple=None ):
"""simple docstring"""
__lowercase =obj
__lowercase =target
__lowercase =new
__lowercase =target.split('.' )[0]
__lowercase ={}
__lowercase =attrs or []
def __enter__( self : Optional[Any] ):
"""simple docstring"""
__lowercase =self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
try:
__lowercase =import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__lowercase =getattr(self.obj , SCREAMING_SNAKE_CASE__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(SCREAMING_SNAKE_CASE__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__lowercase =obj_attr
# patch at top level
setattr(self.obj , SCREAMING_SNAKE_CASE__ , _PatchedModuleObj(SCREAMING_SNAKE_CASE__ , attrs=self.attrs ) )
__lowercase =getattr(self.obj , SCREAMING_SNAKE_CASE__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , _PatchedModuleObj(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , attrs=self.attrs ) )
__lowercase =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# finally set the target attribute
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__lowercase =getattr(import_module('.'.join(SCREAMING_SNAKE_CASE__ ) ) , SCREAMING_SNAKE_CASE__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , SCREAMING_SNAKE_CASE__ ) is attr_value:
__lowercase =getattr(self.obj , SCREAMING_SNAKE_CASE__ )
setattr(self.obj , SCREAMING_SNAKE_CASE__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__lowercase =globals()['''__builtins__'''][target_attr]
setattr(self.obj , SCREAMING_SNAKE_CASE__ , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self : Union[str, Any] , *__lowercase : Union[str, Any] ):
"""simple docstring"""
for attr in list(self.original ):
setattr(self.obj , SCREAMING_SNAKE_CASE__ , self.original.pop(SCREAMING_SNAKE_CASE__ ) )
def snake_case ( self : Dict ):
"""simple docstring"""
self.__enter__()
self._active_patches.append(self )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 141 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase ) ->float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(_UpperCamelCase ) / len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def UpperCamelCase (lowercase_: int ) -> Optional[int]:
# A local function to see if a dot lands in the circle.
def is_in_circle(lowercase_: float , lowercase_: float ) -> bool:
A__ : Optional[Any] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
A__ : List[str] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowercase_ ) )
# The ratio of the area for circle to square is pi/4.
A__ : Dict = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def UpperCamelCase (lowercase_: int , lowercase_: Callable[[float], float] , lowercase_: float = 0.0 , lowercase_: float = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(lowercase_ , lowercase_ ) ) for _ in range(lowercase_ ) ) * (max_value - min_value)
def UpperCamelCase (lowercase_: int , lowercase_: float = 0.0 , lowercase_: float = 1.0 ) -> None:
def identity_function(lowercase_: float ) -> float:
return x
A__ : int = area_under_curve_estimator(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
A__ : str = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print("""******************""" )
def UpperCamelCase (lowercase_: int ) -> None:
def function_to_integrate(lowercase_: float ) -> float:
return sqrt(4.0 - x * x )
A__ : List[Any] = area_under_curve_estimator(
lowercase_ , lowercase_ , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase (lowercase_: str , lowercase_: Optional[int] ) -> str:
A__ : Union[str, Any] = old_name
if "patch_embed" in old_name:
A__ , A__ , A__ : Any = old_name.split(""".""" )
if layer == "0":
A__ : List[Any] = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
A__ : Optional[int] = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
A__ : int = old_name.replace("""3""" , """convolution2""" )
else:
A__ : Dict = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(r"""\d\.\d""" , lowercase_ ):
A__ : str = r"""\b\d{2}\b"""
if bool(re.search(lowercase_ , lowercase_ ) ):
A__ : Optional[Any] = re.search(r"""\d\.\d\d.""" , lowercase_ ).group()
else:
A__ : int = re.search(r"""\d\.\d.""" , lowercase_ ).group()
if int(match[0] ) < 6:
A__ : Optional[Any] = old_name.replace(lowercase_ , """""" )
A__ : Tuple = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
A__ : int = """intermediate_stages.""" + trimmed_name
else:
A__ : Dict = old_name.replace(lowercase_ , """""" )
if int(match[2] ) < num_meta4D_last_stage:
A__ : Optional[int] = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
A__ : Optional[Any] = str(int(match[2] ) - num_meta4D_last_stage )
A__ : Dict = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
A__ : str = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
A__ : Optional[int] = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
A__ : List[Any] = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
A__ : Optional[Any] = trimmed_name.replace("""fc2""" , """linear_out""" )
A__ : str = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(r""".\d.""" , lowercase_ ):
A__ : List[str] = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
A__ : Optional[int] = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
A__ : Optional[int] = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
A__ : int = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
A__ : Tuple = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
A__ : Optional[int] = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
A__ : Optional[Any] = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
A__ : Optional[Any] = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
A__ : Union[str, Any] = new_name.replace("""norm""" , """layernorm""" )
A__ : Union[str, Any] = """efficientformer.""" + new_name
else:
A__ : int = """efficientformer.encoder.""" + new_name
return new_name
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: Union[str, Any] ) -> Tuple:
for key in checkpoint.copy().keys():
A__ : List[Any] = checkpoint.pop(lowercase_ )
A__ : Dict = val
return checkpoint
def UpperCamelCase () -> Optional[int]:
A__ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : List[str] = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return image
def UpperCamelCase (lowercase_: Path , lowercase_: Path , lowercase_: Path , lowercase_: bool ) -> Tuple:
A__ : Any = torch.load(lowercase_ , map_location="""cpu""" )["""model"""]
A__ : List[Any] = EfficientFormerConfig.from_json_file(lowercase_ )
A__ : Any = EfficientFormerForImageClassificationWithTeacher(lowercase_ )
A__ : List[str] = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
A__ : Union[str, Any] = config.depths[-1] - config.num_metaad_blocks + 1
A__ : Any = convert_torch_checkpoint(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
A__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
A__ : Optional[int] = prepare_img()
A__ : Optional[Any] = 256
A__ : str = 224
A__ : List[str] = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
A__ : Tuple = processor(images=lowercase_ , return_tensors="""pt""" ).pixel_values
# original processing pipeline
A__ : List[Any] = Compose(
[
Resize(lowercase_ , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(lowercase_ ),
ToTensor(),
Normalize(lowercase_ , lowercase_ ),
] )
A__ : Any = image_transforms(lowercase_ ).unsqueeze(0 )
assert torch.allclose(lowercase_ , lowercase_ )
A__ : Optional[int] = model(lowercase_ )
A__ : List[str] = outputs.logits
A__ : Tuple = (1, 1000)
if "l1" in model_name:
A__ : List[str] = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowercase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
A__ : Any = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowercase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
A__ : Union[str, Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowercase_ )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="""Add model""" , use_temp_dir=lowercase_ , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="""Add image processor""" , use_temp_dir=lowercase_ , )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
A_ : List[Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 141 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 288 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
UpperCAmelCase__ = 'tiny-wmt19-en-ru'
# Build
# borrowed from a test
UpperCAmelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
UpperCAmelCase__ = dict(zip(vocab, range(len(vocab))))
UpperCAmelCase__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ = Path(tmpdirname)
UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['src_vocab_file']
UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file']
UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['merges_file']
with open(src_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, 'w') as fp:
fp.write('\n'.join(merges))
UpperCAmelCase__ = FSMTTokenizer(
langs=['en', 'ru'],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
UpperCAmelCase__ = FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
UpperCAmelCase__ = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
UpperCAmelCase__ = tokenizer(['Making tiny model'], return_tensors='pt')
UpperCAmelCase__ = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 288 | 1 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=64 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ):
a :Optional[Any] = parent
a :Any = batch_size
a :Any = seq_length
a :Tuple = is_training
a :Dict = use_input_mask
a :Dict = use_token_type_ids
a :int = use_labels
a :str = vocab_size
a :Optional[Any] = hidden_size
a :str = num_hidden_layers
a :int = num_attention_heads
a :Tuple = intermediate_size
a :Union[str, Any] = hidden_act
a :List[str] = hidden_dropout_prob
a :List[Any] = attention_probs_dropout_prob
a :Union[str, Any] = max_position_embeddings
a :Dict = type_vocab_size
a :Optional[int] = type_sequence_label_size
a :List[str] = initializer_range
a :Union[str, Any] = num_labels
a :Union[str, Any] = num_choices
a :List[str] = scope
a :Any = vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a :List[Any] = None
if self.use_input_mask:
a :str = random_attention_mask([self.batch_size, self.seq_length] )
a :Tuple = None
if self.use_labels:
a :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a :Union[str, Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE__ ( self ):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = self.prepare_config_and_inputs()
a :Optional[Any] = True
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Dict = GPTNeoXModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
a :Dict = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
a :Any = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :str = True
a :Dict = GPTNeoXModel(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
a :int = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Optional[Any] = GPTNeoXForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
a :int = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :List[Any] = self.num_labels
a :Union[str, Any] = GPTNeoXForQuestionAnswering(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
a :Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Any = self.num_labels
a :Optional[Any] = GPTNeoXForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
a :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a :Dict = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Optional[int] = self.num_labels
a :int = GPTNeoXForTokenClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
a :Optional[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :List[str] = True
a :Dict = GPTNeoXForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
# first forward pass
a :Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , use_cache=_lowerCamelCase )
a :int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a :int = ids_tensor((self.batch_size, 3) , config.vocab_size )
a :Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
a :Optional[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
a :Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , output_hidden_states=_lowerCamelCase )
a :Optional[int] = output_from_no_past["hidden_states"][0]
a :List[str] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , output_hidden_states=_lowerCamelCase , )["hidden_states"][0]
# select random slice
a :Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a :str = output_from_no_past[:, -3:, random_slice_idx].detach()
a :List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = self.prepare_config_and_inputs()
a :Optional[Any] = config_and_inputs
a :List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( a__ , a__ , a__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = GPTNeoXModelTester(self )
a :Tuple = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=64 , num_attention_heads=8 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
# This regression test was failing with PyTorch < 1.3
a :Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
a :int = None
self.model_tester.create_and_check_model_as_decoder(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
@unittest.skip(reason='''Feed forward chunking is not implemented''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :str = self.model_tester.prepare_config_and_inputs_for_common()
a :int = ids_tensor([1, 10] , config.vocab_size )
a :Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a :Any = GPTNeoXModel(_lowerCamelCase )
original_model.to(_lowerCamelCase )
original_model.eval()
a :Dict = original_model(_lowerCamelCase ).last_hidden_state
a :Optional[int] = original_model(_lowerCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a :str = {"type": scaling_type, "factor": 10.0}
a :Optional[int] = GPTNeoXModel(_lowerCamelCase )
scaled_model.to(_lowerCamelCase )
scaled_model.eval()
a :List[Any] = scaled_model(_lowerCamelCase ).last_hidden_state
a :Tuple = scaled_model(_lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5 ) )
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
for checkpointing in [True, False]:
a :Any = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(_lowerCamelCase )
a :Tuple = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(_lowerCamelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
a :Optional[int] = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"
a :Dict = model.generate(**_lowerCamelCase , do_sample=_lowerCamelCase , max_new_tokens=20 )
a :int = tokenizer.batch_decode(_lowerCamelCase )[0]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
| 370 |
import math
def __lowerCamelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
"""simple docstring"""
return math.pow(UpperCAmelCase_ , 2 ) - a
def __lowerCamelCase ( UpperCAmelCase_ : float ):
"""simple docstring"""
return 2 * x
def __lowerCamelCase ( UpperCAmelCase_ : float ):
"""simple docstring"""
a :int = 2.0
while start <= a:
a :int = math.pow(UpperCAmelCase_ , 2 )
return start
def __lowerCamelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : int = 9999 , UpperCAmelCase_ : float = 0.00000000000001 ):
"""simple docstring"""
if a < 0:
raise ValueError('''math domain error''' )
a :List[Any] = get_initial_point(UpperCAmelCase_ )
for _ in range(UpperCAmelCase_ ):
a :Optional[int] = value
a :int = value - fx(UpperCAmelCase_ , UpperCAmelCase_ ) / fx_derivative(UpperCAmelCase_ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 281 | 0 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
__UpperCAmelCase = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__UpperCAmelCase = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__UpperCAmelCase = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
__UpperCAmelCase = f'''layers_{str(SCREAMING_SNAKE_CASE )}'''
# Self-Attention
__UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
__UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
__UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
__UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
__UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
__UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
__UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
__UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
__UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
__UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
__UpperCAmelCase = flax_model.params['''encoder''']['''block'''][str(SCREAMING_SNAKE_CASE )]['''layer''']
__UpperCAmelCase = tax_attention_key
__UpperCAmelCase = tax_attention_out
__UpperCAmelCase = tax_attention_query
__UpperCAmelCase = tax_attention_value
__UpperCAmelCase = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__UpperCAmelCase = tax_global_layer_norm
if split_mlp_wi:
__UpperCAmelCase = tax_mlp_wi_a
__UpperCAmelCase = tax_mlp_wi_a
else:
__UpperCAmelCase = tax_mlp_wi
__UpperCAmelCase = tax_mlp_wo
__UpperCAmelCase = tax_mlp_layer_norm
__UpperCAmelCase = flax_model_encoder_layer_block
# Only for layer 0:
__UpperCAmelCase = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
__UpperCAmelCase = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__UpperCAmelCase = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
__UpperCAmelCase = tax_encoder_global_rel_embedding
# Assigning
__UpperCAmelCase = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
__UpperCAmelCase = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__UpperCAmelCase = f'''layers_{str(SCREAMING_SNAKE_CASE )}'''
# Self-Attention
__UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
__UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
__UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
__UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
__UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
__UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
__UpperCAmelCase = tax_enc_dec_attention_module['''key''']['''kernel''']
__UpperCAmelCase = tax_enc_dec_attention_module['''out''']['''kernel''']
__UpperCAmelCase = tax_enc_dec_attention_module['''query''']['''kernel''']
__UpperCAmelCase = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
__UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
__UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
__UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
__UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
__UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
__UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
__UpperCAmelCase = flax_model.params['''decoder''']['''block'''][str(SCREAMING_SNAKE_CASE )]['''layer''']
__UpperCAmelCase = tax_attention_key
__UpperCAmelCase = tax_attention_out
__UpperCAmelCase = tax_attention_query
__UpperCAmelCase = tax_attention_value
__UpperCAmelCase = tax_pre_attention_layer_norm
__UpperCAmelCase = tax_enc_dec_attention_key
__UpperCAmelCase = tax_enc_dec_attention_out
__UpperCAmelCase = tax_enc_dec_attention_query
__UpperCAmelCase = tax_enc_dec_attention_value
__UpperCAmelCase = tax_cross_layer_norm
if split_mlp_wi:
__UpperCAmelCase = tax_mlp_wi_a
__UpperCAmelCase = tax_mlp_wi_a
else:
__UpperCAmelCase = tax_mlp_wi
__UpperCAmelCase = tax_mlp_wo
__UpperCAmelCase = txa_mlp_layer_norm
__UpperCAmelCase = flax_model_decoder_layer_block
# Decoder Normalization
__UpperCAmelCase = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
__UpperCAmelCase = txa_decoder_norm
# Only for layer 0:
__UpperCAmelCase = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
__UpperCAmelCase = tax_decoder_rel_embedding
# Token Embeddings
__UpperCAmelCase = tax_model['''target''']['''token_embedder''']['''embedding''']
__UpperCAmelCase = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__UpperCAmelCase = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(SCREAMING_SNAKE_CASE )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
A_ : int = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 333 |
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
# edges = list of graph's edges
__UpperCAmelCase = get_edges(SCREAMING_SNAKE_CASE )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__UpperCAmelCase , __UpperCAmelCase = edges.pop()
chosen_vertices.add(SCREAMING_SNAKE_CASE )
chosen_vertices.add(SCREAMING_SNAKE_CASE )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(SCREAMING_SNAKE_CASE )
return chosen_vertices
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 333 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : int = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class snake_case_ (a__ ):
UpperCAmelCase__ : Tuple = '''levit'''
def __init__( self :int ,__snake_case :List[Any]=2_24 ,__snake_case :Union[str, Any]=3 ,__snake_case :int=3 ,__snake_case :Tuple=2 ,__snake_case :Optional[int]=1 ,__snake_case :List[str]=16 ,__snake_case :Tuple=[1_28, 2_56, 3_84] ,__snake_case :List[str]=[4, 8, 12] ,__snake_case :Optional[int]=[4, 4, 4] ,__snake_case :Union[str, Any]=[16, 16, 16] ,__snake_case :int=0 ,__snake_case :Union[str, Any]=[2, 2, 2] ,__snake_case :Optional[Any]=[2, 2, 2] ,__snake_case :Optional[Any]=0.02 ,**__snake_case :List[Any] ,) -> str:
super().__init__(**_lowerCamelCase )
a__ = image_size
a__ = num_channels
a__ = kernel_size
a__ = stride
a__ = padding
a__ = hidden_sizes
a__ = num_attention_heads
a__ = depths
a__ = key_dim
a__ = drop_path_rate
a__ = patch_size
a__ = attention_ratio
a__ = mlp_ratio
a__ = initializer_range
a__ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class snake_case_ (a__ ):
UpperCAmelCase__ : Any = version.parse('''1.11''' )
@property
def lowerCamelCase__( self :Optional[int] ) -> Optional[Any]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__( self :int ) -> Tuple:
return 1E-4
| 354 |
snake_case : str = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def __lowercase ( __lowerCAmelCase : float ):
assert type(__lowerCAmelCase ) in (int, float) and decimal == int(__lowerCAmelCase )
a__ = int(__lowerCAmelCase )
a__ = ''
a__ = False
if decimal < 0:
a__ = True
decimal *= -1
while decimal > 0:
a__ , a__ = divmod(__lowerCAmelCase , 1_6 )
a__ = values[remainder] + hexadecimal
a__ = '0x' + hexadecimal
if negative:
a__ = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 | 0 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
lowercase_ = logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase :
_a = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
_a = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
_a = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_a = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase =self.task_name.lower()
class __lowerCAmelCase ( UpperCamelCase__ ):
_a = """train"""
_a = """dev"""
_a = """test"""
class __lowerCAmelCase ( UpperCamelCase__ ):
_a = 42
_a = 42
_a = 42
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = Split.train , lowerCAmelCase = None , ) -> Tuple:
'''simple docstring'''
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , lowercase_ , )
_lowercase =args
_lowercase =glue_processors[args.task_name]()
_lowercase =glue_output_modes[args.task_name]
if isinstance(lowercase_ , lowercase_ ):
try:
_lowercase =Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
_lowercase =os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
_lowercase =self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_lowercase , _lowercase =label_list[2], label_list[1]
_lowercase =label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowercase =cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
_lowercase =time.time()
_lowercase =torch.load(lowercase_ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(F'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
_lowercase =self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_lowercase =self.processor.get_test_examples(args.data_dir )
else:
_lowercase =self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_lowercase =examples[:limit_length]
_lowercase =glue_convert_examples_to_features(
lowercase_ , lowercase_ , max_length=args.max_seq_length , label_list=lowercase_ , output_mode=self.output_mode , )
_lowercase =time.time()
torch.save(self.features , lowercase_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> int:
'''simple docstring'''
return len(self.features )
def __getitem__( self , lowerCAmelCase ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return self.label_list
| 205 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
class UpperCAmelCase :
__lowercase = 42
__lowercase = None
@staticmethod
def UpperCAmelCase_ ( )-> Dict:
raise NotImplementedError
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :str , lowercase_ :int , lowercase_ :str , **lowercase_ :Dict )-> str:
raise NotImplementedError
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :int )-> Any:
raise NotImplementedError
def UpperCAmelCase_ ( self :List[Any] )-> Optional[Any]:
if not self.is_available():
raise RuntimeError(
F"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def UpperCAmelCase_ ( cls :int )-> Any:
return F"`pip install {cls.pip_package or cls.name}`"
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """optuna"""
@staticmethod
def UpperCAmelCase_ ( )-> int:
return is_optuna_available()
def UpperCAmelCase_ ( self :List[str] , lowercase_ :str , lowercase_ :int , lowercase_ :str , **lowercase_ :List[Any] )-> Tuple:
return run_hp_search_optuna(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :str , lowercase_ :Optional[int] )-> Optional[Any]:
return default_hp_space_optuna(lowercase_ )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """ray"""
__lowercase = """'ray[tune]'"""
@staticmethod
def UpperCAmelCase_ ( )-> str:
return is_ray_available()
def UpperCAmelCase_ ( self :int , lowercase_ :Dict , lowercase_ :int , lowercase_ :str , **lowercase_ :List[str] )-> int:
return run_hp_search_ray(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Dict )-> int:
return default_hp_space_ray(lowercase_ )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """sigopt"""
@staticmethod
def UpperCAmelCase_ ( )-> Union[str, Any]:
return is_sigopt_available()
def UpperCAmelCase_ ( self :Any , lowercase_ :Union[str, Any] , lowercase_ :int , lowercase_ :str , **lowercase_ :Dict )-> Dict:
return run_hp_search_sigopt(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Optional[int] )-> List[str]:
return default_hp_space_sigopt(lowercase_ )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """wandb"""
@staticmethod
def UpperCAmelCase_ ( )-> List[str]:
return is_wandb_available()
def UpperCAmelCase_ ( self :Dict , lowercase_ :Optional[Any] , lowercase_ :int , lowercase_ :str , **lowercase_ :Dict )-> List[str]:
return run_hp_search_wandb(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :str )-> Dict:
return default_hp_space_wandb(lowercase_ )
__lowerCAmelCase : int ={
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCamelCase ( ):
A__ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_lowerCamelCase ) > 0:
A__ = available_backends[0].name
if len(_lowerCamelCase ) > 1:
logger.info(
F"{len(_lowerCamelCase )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 237 | 0 |
'''simple docstring'''
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 4 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
snake_case__ = sys.version_info >= (3, 10)
def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 4_2
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
_lowerCAmelCase = 4_2
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = BasicEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[1, 2, 3] )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
_lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = field()
_lowerCAmelCase = field()
_lowerCAmelCase = field()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = BasicEnum(self.required_enum )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = field()
_lowerCAmelCase = None
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase )
self.assertFalse(example.flag )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Any = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
A_ : Dict = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : Any = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : str = HfArgumentParser(_lowerCamelCase )
A_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : str = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
A_ : int = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _a ( self : Optional[int] ):
"""simple docstring"""
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
A_ : List[str] = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : int = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = parser.parse_args([] )
self.assertEqual(
_lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
A_ : Tuple = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : int = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) )
A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Dict = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0]
A_ : str = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Any = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
A_ : Optional[Any] = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
A_ : int = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = HfArgumentParser(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
| 4 | 1 |
'''simple docstring'''
import random
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = num - 1
__SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
__SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
__SCREAMING_SNAKE_CASE = random.randrange(2 , num - 1 )
__SCREAMING_SNAKE_CASE = pow(a__ , a__ , a__ )
if v != 1:
__SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__SCREAMING_SNAKE_CASE = i + 1
__SCREAMING_SNAKE_CASE = (v**2) % num
return True
def a__ ( a__ ):
"""simple docstring"""
if num < 2:
return False
__SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(a__ )
def a__ ( a__ = 10_24 ):
"""simple docstring"""
while True:
__SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(a__ ):
return num
if __name__ == "__main__":
UpperCAmelCase : Tuple = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 267 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
# fmt: off
__SCREAMING_SNAKE_CASE = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__SCREAMING_SNAKE_CASE = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
__SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple , **__SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
__SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
__SCREAMING_SNAKE_CASE = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = processor(text=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer(__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE = processor.batch_decode(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 267 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> float:
'''simple docstring'''
def get_matched_characters(_UpperCamelCase : str , _UpperCamelCase : str ) -> str:
UpperCamelCase__ = []
UpperCamelCase__ = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
UpperCamelCase__ = int(max(0 , i - limit ) )
UpperCamelCase__ = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_UpperCamelCase )
UpperCamelCase__ = F'{_stra[0:_stra.index(_UpperCamelCase )]} {_stra[_stra.index(_UpperCamelCase ) + 1:]}'
return "".join(_UpperCamelCase )
# matching characters
UpperCamelCase__ = get_matched_characters(_UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = get_matched_characters(_UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = len(_UpperCamelCase )
# transposition
UpperCamelCase__ = (
len([(ca, ca) for ca, ca in zip(_UpperCamelCase , _UpperCamelCase ) if ca != ca] ) // 2
)
if not match_count:
UpperCamelCase__ = 0.0
else:
UpperCamelCase__ = (
1
/ 3
* (
match_count / len(_UpperCamelCase )
+ match_count / len(_UpperCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
UpperCamelCase__ = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world")) | 31 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowercase: str = random.Random()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ):
"""simple docstring"""
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(a_ )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, padding=a_, max_length=a_, return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self : List[str], a_ : int ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) ) | 31 | 1 |
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowercase (_lowerCAmelCase , _lowerCAmelCase="shi-labs/oneformer_demo" ):
with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) as f:
__lowerCAmelCase = json.load(_lowerCAmelCase )
__lowerCAmelCase = {}
__lowerCAmelCase = []
__lowerCAmelCase = []
for key, info in class_info.items():
__lowerCAmelCase = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(_lowerCAmelCase ) )
__lowerCAmelCase = thing_ids
__lowerCAmelCase = class_names
return metadata
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=30 , snake_case_=400 , snake_case_=None , snake_case_=True , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , snake_case_=10 , snake_case_=False , snake_case_=255 , snake_case_="shi-labs/oneformer_demo" , snake_case_="ade20k_panoptic.json" , snake_case_=10 , ) -> Union[str, Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = {"""shortest_edge""": 32, """longest_edge""": 1_333} if size is None else size
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = class_info_file
__lowerCAmelCase = prepare_metadata(snake_case_ , snake_case_ )
__lowerCAmelCase = num_text
__lowerCAmelCase = repo_path
# for the post_process_functions
__lowerCAmelCase = 2
__lowerCAmelCase = 10
__lowerCAmelCase = 10
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = num_labels
__lowerCAmelCase = do_reduce_labels
__lowerCAmelCase = ignore_index
def A__ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A__ ( self , snake_case_ , snake_case_=False ) -> Dict:
if not batched:
__lowerCAmelCase = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image.size
else:
__lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase = int(self.size["""shortest_edge"""] * h / w )
__lowerCAmelCase = self.size["""shortest_edge"""]
elif w > h:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = int(self.size["""shortest_edge"""] * w / h )
else:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = self.size["""shortest_edge"""]
else:
__lowerCAmelCase = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
def A__ ( self ) -> Tuple:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_snake_case = image_processing_class
def A__ ( self ) -> str:
__lowerCAmelCase = OneFormerImageProcessorTester(self )
@property
def A__ ( self ) -> Dict:
return self.image_processing_tester.prepare_image_processor_dict()
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """image_mean""" ) )
self.assertTrue(hasattr(snake_case_ , """image_std""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """ignore_index""" ) )
self.assertTrue(hasattr(snake_case_ , """class_info_file""" ) )
self.assertTrue(hasattr(snake_case_ , """num_text""" ) )
self.assertTrue(hasattr(snake_case_ , """repo_path""" ) )
self.assertTrue(hasattr(snake_case_ , """metadata""" ) )
self.assertTrue(hasattr(snake_case_ , """do_reduce_labels""" ) )
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Union[str, Any]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> List[str]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> Tuple:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self , snake_case_=False , snake_case_=False , snake_case_="np" ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__lowerCAmelCase = self.image_processing_tester.num_labels
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
if with_segmentation_maps:
__lowerCAmelCase = num_labels
if is_instance_map:
__lowerCAmelCase = list(range(snake_case_ ) ) * 2
__lowerCAmelCase = dict(enumerate(snake_case_ ) )
__lowerCAmelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__lowerCAmelCase = [Image.fromarray(snake_case_ ) for annotation in annotations]
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , snake_case_ , return_tensors="""pt""" , instance_id_to_semantic_id=snake_case_ , pad_and_return_pixel_mask=snake_case_ , )
return inputs
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Optional[Any]:
def common(snake_case_=False , snake_case_=None ):
__lowerCAmelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case_ , is_instance_map=snake_case_ , segmentation_type=snake_case_ )
__lowerCAmelCase = inputs["""mask_labels"""]
__lowerCAmelCase = inputs["""class_labels"""]
__lowerCAmelCase = inputs["""pixel_values"""]
__lowerCAmelCase = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case_ , snake_case_ , snake_case_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case_ )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = np.zeros((20, 50) )
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = binary_mask_to_rle(snake_case_ )
self.assertEqual(len(snake_case_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__lowerCAmelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ , target_sizes=snake_case_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_instance_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_panoptic_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 301 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 301 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ :Optional[int] = {
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :List[str] = [
'UperNetForSemanticSegmentation',
'UperNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
A_ :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 356 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 245 | 0 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_a = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def __A ( __lowerCAmelCase = "mumbai" )-> Generator[tuple[str, str], None, None]:
"""simple docstring"""
_UpperCAmelCase = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
_UpperCAmelCase = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
_UpperCAmelCase = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(F'''Job {i:>2} is {job[0]} at {job[1]}''')
| 39 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase_ : Any = {'UserAgent': UserAgent().random}
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> dict:
"""simple docstring"""
a_ : Tuple = script.contents[0]
a_ : int = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
a_ : Tuple = F"""https://www.instagram.com/{username}/"""
a_ : Optional[Any] = self.get_json()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> dict:
a_ : Any = requests.get(self.url , headers=SCREAMING_SNAKE_CASE__ ).text
a_ : Dict = BeautifulSoup(SCREAMING_SNAKE_CASE__ , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Union[str, Any] ) -> str:
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self : Optional[int] ) -> str:
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
return self.user_data["username"]
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
return self.user_data["full_name"]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.user_data["biography"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["business_email"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.user_data["external_url"]
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> bool:
return self.user_data["is_verified"]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> bool:
return self.user_data["is_private"]
def SCREAMING_SNAKE_CASE_ ( __A : str = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
a_ : int = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Union[str, Any] = InstagramUser('github')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 32 | 0 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : List[str] ) -> List[Any]:
__snake_case = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__snake_case = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
__snake_case = f"""{src_lang}-{tgt_lang}"""
__snake_case = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__snake_case = os.path.join(snake_case_ , '''README.md''' )
print(f"""Generating {path}""" )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case_ )
# make sure we are under the root of the project
snake_case_ = Path(__file__).resolve().parent.parent.parent
snake_case_ = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
snake_case_ , snake_case_ , snake_case_ = model_name.split('-')
snake_case_ = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 238 |
def lowerCamelCase__ ( snake_case_ : int = 1000 ) -> int:
__snake_case = 2**power
__snake_case = str(snake_case_ )
__snake_case = list(snake_case_ )
__snake_case = 0
for i in list_num:
sum_of_num += int(snake_case_ )
return sum_of_num
if __name__ == "__main__":
snake_case_ = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
snake_case_ = solution(power)
print('Sum of the digits is: ', result)
| 238 | 1 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
while second != 0:
A : int = first & second
first ^= second
A : Tuple = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_:int = int(input("""Enter the first number: """).strip())
SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 116 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_:str = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 116 | 1 |
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class UpperCAmelCase :
'''simple docstring'''
pass
| 370 | '''simple docstring'''
def __UpperCamelCase ( ):
lowercase__ : Any = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lowercase__ : Any = 6
lowercase__ : Optional[Any] = 1
lowercase__ : int = 1901
lowercase__ : List[str] = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowercase__ : List[Any] = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
lowercase__ : Any = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
lowercase__ : List[Any] = day - days_per_month[month - 2]
if month > 12:
year += 1
lowercase__ : Dict = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 214 | 0 |
'''simple docstring'''
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 4 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : int ):
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCAmelCase = []
lowerCAmelCase = -1
for i in range(lowerCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
lowerCAmelCase = 0
lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : list[int] ):
lowerCAmelCase = [0] * no_of_processes
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__snake_case =4
__snake_case =[2, 5, 3, 7]
__snake_case =[0, 0, 0, 0]
__snake_case =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 4 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = ['keras_nlp']
def __init__( self : str , *A : Dict , **A : Dict ):
requires_backends(self , ["keras_nlp"] )
| 202 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : List[str] = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase : Union[str, Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_UpperCAmelCase : List[Any] = dict(zip(A , range(len(A ) ) ) )
_UpperCAmelCase : Union[str, Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_UpperCAmelCase : Optional[int] = {"unk_token": "<unk>"}
_UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(A ) )
_UpperCAmelCase : List[str] = {
"do_resize": True,
"size": 2_0,
"do_center_crop": True,
"crop_size": 1_8,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_UpperCAmelCase : Any = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(A , A )
def snake_case_ ( self : List[Any] , **A : Union[str, Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : int , **A : Any ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : List[str] , **A : Optional[Any] ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : str ):
_UpperCAmelCase : int = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_UpperCAmelCase : Dict = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Dict = self.get_rust_tokenizer()
_UpperCAmelCase : int = self.get_image_processor()
_UpperCAmelCase : List[Any] = CLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
_UpperCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase : Any = self.get_image_processor(do_normalize=A , padding_value=1.0 )
_UpperCAmelCase : Any = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : str = self.get_image_processor()
_UpperCAmelCase : List[str] = self.get_tokenizer()
_UpperCAmelCase : Any = CLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Dict = self.prepare_image_inputs()
_UpperCAmelCase : Optional[int] = image_processor(A , return_tensors="np" )
_UpperCAmelCase : Any = processor(images=A , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case_ ( self : str ):
_UpperCAmelCase : Tuple = self.get_image_processor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : List[str] = CLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Optional[int] = "lower newer"
_UpperCAmelCase : Union[str, Any] = processor(text=A )
_UpperCAmelCase : Optional[int] = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Union[str, Any] = self.get_image_processor()
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : str = CLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Tuple = "lower newer"
_UpperCAmelCase : Union[str, Any] = self.prepare_image_inputs()
_UpperCAmelCase : str = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def snake_case_ ( self : int ):
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : Dict = self.get_tokenizer()
_UpperCAmelCase : List[Any] = CLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : List[str] = processor.batch_decode(A )
_UpperCAmelCase : int = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : int = CLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : str = "lower newer"
_UpperCAmelCase : int = self.prepare_image_inputs()
_UpperCAmelCase : Optional[Any] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 202 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Any , __lowercase : Optional[int] , __lowercase : List[str]=7 , __lowercase : Dict=3 , __lowercase : Dict=18 , __lowercase : Union[str, Any]=30 , __lowercase : Optional[Any]=4_00 , __lowercase : Dict=True , __lowercase : Dict=None , __lowercase : Any=True , ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] =size if size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE__ : List[Any] =parent
SCREAMING_SNAKE_CASE__ : List[str] =batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =num_channels
SCREAMING_SNAKE_CASE__ : List[Any] =image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =min_resolution
SCREAMING_SNAKE_CASE__ : Dict =max_resolution
SCREAMING_SNAKE_CASE__ : List[Any] =do_resize
SCREAMING_SNAKE_CASE__ : Dict =size
SCREAMING_SNAKE_CASE__ : Dict =apply_ocr
def __magic_name__ ( self : int ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
snake_case_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __magic_name__ ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =LayoutLMvaImageProcessingTester(self )
@property
def __magic_name__ ( self : Optional[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''apply_ocr''' ) )
def __magic_name__ ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
SCREAMING_SNAKE_CASE__ : str =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __magic_name__ ( self : Dict ) -> Any:
pass
def __magic_name__ ( self : int ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : List[str] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Any =image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes , __SCREAMING_SNAKE_CASE )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[Any] =image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple =image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __magic_name__ ( self : Dict ) -> int:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Any =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Any =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Dict =image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __magic_name__ ( self : Dict ) -> Any:
# with apply_OCR = True
SCREAMING_SNAKE_CASE__ : List[Any] =LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ : Any =load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : int =Image.open(ds[0]['''file'''] ).convert('''RGB''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE__ : Tuple =[['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
SCREAMING_SNAKE_CASE__ : Tuple =[[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes , __SCREAMING_SNAKE_CASE )
# with apply_OCR = False
SCREAMING_SNAKE_CASE__ : Tuple =LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : Optional[Any] =image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) ) | 152 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_SCREAMING_SNAKE_CASE : List[Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class a ( nn.Module ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
super().__init__()
lowerCamelCase_ = torchvision.models.resnetaaa(pretrained=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = list(model.children() )[:-2]
lowerCamelCase_ = nn.Sequential(*__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Any:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
lowerCamelCase_ = self.pool(self.model(__SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ = torch.flatten(__SCREAMING_SNAKE_CASE , start_dim=2 )
lowerCamelCase_ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class a ( __snake_case ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
lowerCamelCase_ = [json.loads(__SCREAMING_SNAKE_CASE ) for l in open(__SCREAMING_SNAKE_CASE )]
lowerCamelCase_ = os.path.dirname(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer
lowerCamelCase_ = labels
lowerCamelCase_ = len(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = max_seq_length
lowerCamelCase_ = transforms
def __len__( self : Any ) -> Any:
return len(self.data )
def __getitem__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
lowerCamelCase_ = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=__SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = sentence[0], sentence[1:-1], sentence[-1]
lowerCamelCase_ = sentence[: self.max_seq_length]
lowerCamelCase_ = torch.zeros(self.n_classes )
lowerCamelCase_ = 1
lowerCamelCase_ = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
lowerCamelCase_ = self.transforms(__SCREAMING_SNAKE_CASE )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def UpperCamelCase ( self : Dict ) -> Dict:
lowerCamelCase_ = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] ) -> str:
lowerCamelCase_ = [len(row['sentence'] ) for row in batch]
lowerCamelCase_ , lowerCamelCase_ = len(_lowerCamelCase ), max(_lowerCamelCase )
lowerCamelCase_ = torch.zeros(_lowerCamelCase , _lowerCamelCase , dtype=torch.long )
lowerCamelCase_ = torch.zeros(_lowerCamelCase , _lowerCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_lowerCamelCase , _lowerCamelCase ) ):
lowerCamelCase_ = input_row['sentence']
lowerCamelCase_ = 1
lowerCamelCase_ = torch.stack([row['image'] for row in batch] )
lowerCamelCase_ = torch.stack([row['label'] for row in batch] )
lowerCamelCase_ = torch.stack([row['image_start_token'] for row in batch] )
lowerCamelCase_ = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase__ ( ) -> List[str]:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase__ ( ) -> Union[str, Any]:
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 183 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( __snake_case ) -> List[str]:
"""simple docstring"""
for i in range(0, __snake_case ):
for _ in range(0, n - i - 1 ): # printing spaces
print(''' ''', end='''''' )
for _ in range(0, i + 1 ): # printing stars
print('''* ''', end='''''' )
print()
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
for i in range(__snake_case, 0, -1 ):
for _ in range(__snake_case, 0, -1 ): # printing stars
print('''* ''', end='''''' )
print()
for _ in range(n - i + 1, 0, -1 ): # printing spaces
print(''' ''', end='''''' )
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(__snake_case ) # upper half
reverse_floyd(__snake_case ) # lower half
if __name__ == "__main__":
print(R"""| /\ | |- | |- |--| |\ /| |-""")
print(R"""|/ \| |- |_ |_ |__| | \/ | |_""")
_a = 1
while K:
_a = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
_a = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 363 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_a = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 100 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
UpperCAmelCase_ = 1
UpperCAmelCase_ = 1
while repunit:
UpperCAmelCase_ = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowerCAmelCase_ ( snake_case_ : List[Any] = 1_00_00_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"{solution() = }")
| 1 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __snake_case( _lowerCAmelCase ) -> Any:
for i in range(0 , _lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def __snake_case( _lowerCAmelCase ) -> List[str]:
for i in range(_lowerCAmelCase , 0 , -1 ):
for _ in range(_lowerCAmelCase , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def __snake_case( _lowerCAmelCase ) -> List[Any]:
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(_lowerCAmelCase ) # upper half
reverse_floyd(_lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(R"| /\ | |- | |- |--| |\ /| |-")
print(R"|/ \| |- |_ |_ |__| | \/ | |_")
__a = 1
while K:
__a = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__a = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 35 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a ):
'''simple docstring'''
_lowercase : List[Any] = """data2vec-text"""
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=True , _lowercase=None , **_lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = classifier_dropout
class UpperCAmelCase_ ( _a ):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 368 |
'''simple docstring'''
import logging
from transformers import PretrainedConfig
_lowercase = logging.getLogger(__name__)
_lowercase = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Optional[Any] = '''bertabs'''
def __init__( self , _lowercase=30_522 , _lowercase=512 , _lowercase=6 , _lowercase=512 , _lowercase=8 , _lowercase=512 , _lowercase=0.2 , _lowercase=6 , _lowercase=768 , _lowercase=8 , _lowercase=2_048 , _lowercase=0.2 , **_lowercase , ):
"""simple docstring"""
super().__init__(**_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_pos
_lowerCAmelCase = enc_layers
_lowerCAmelCase = enc_hidden_size
_lowerCAmelCase = enc_heads
_lowerCAmelCase = enc_ff_size
_lowerCAmelCase = enc_dropout
_lowerCAmelCase = dec_layers
_lowerCAmelCase = dec_hidden_size
_lowerCAmelCase = dec_heads
_lowerCAmelCase = dec_ff_size
_lowerCAmelCase = dec_dropout
| 229 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
__UpperCamelCase = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {f'''funnel-transformer/{name}''': 512 for name in _model_names}
__UpperCamelCase = {f'''funnel-transformer/{name}''': {'''do_lower_case''': True} for name in _model_names}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ : str = FunnelTokenizer
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : int = 2
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<sep>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<cls>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__="##" , **lowerCAmelCase__ , ) -> Tuple:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , clean_text=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , wordpieces_prefix=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = strip_accents
SCREAMING_SNAKE_CASE = tokenize_chinese_chars
SCREAMING_SNAKE_CASE = normalizer_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = do_lower_case
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 113 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ = None ) -> None:
if components is None:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = list(lowerCAmelCase__ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(lowerCAmelCase__ , self.__components ) ) + ")"
def __add__( self , lowerCAmelCase__ ) -> Vector:
SCREAMING_SNAKE_CASE = len(self )
if size == len(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = [self.__components[i] + other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self , lowerCAmelCase__ ) -> Vector:
SCREAMING_SNAKE_CASE = len(self )
if size == len(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = [self.__components[i] - other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> float:
...
def __mul__( self , lowerCAmelCase__ ) -> float | Vector:
if isinstance(lowerCAmelCase__ , (float, int) ):
SCREAMING_SNAKE_CASE = [c * other for c in self.__components]
return Vector(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(self ) == len(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = len(self )
SCREAMING_SNAKE_CASE = [self.__components[i] * other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return sum(lowerCAmelCase__ )
else: # error case
raise Exception('invalid operand!' )
def __A ( self ) -> Vector:
return Vector(self.__components )
def __A ( self , lowerCAmelCase__ ) -> float:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
SCREAMING_SNAKE_CASE = value
def __A ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
SCREAMING_SNAKE_CASE = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase__ ) )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> float:
SCREAMING_SNAKE_CASE = self * other
SCREAMING_SNAKE_CASE = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> Vector:
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return Vector([0] * dimension )
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Vector:
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ))
SCREAMING_SNAKE_CASE = [0] * dimension
SCREAMING_SNAKE_CASE = 1
return Vector(SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Vector , SCREAMING_SNAKE_CASE_ : Vector ) -> Vector:
assert (
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
and (isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ))
)
return x * scalar + y
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Vector:
random.seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = [random.randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ )]
return Vector(SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
SCREAMING_SNAKE_CASE = matrix
SCREAMING_SNAKE_CASE = w
SCREAMING_SNAKE_CASE = h
def __str__( self ) -> str:
SCREAMING_SNAKE_CASE = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE = []
for i in range(self.__height ):
SCREAMING_SNAKE_CASE = [
self.__matrix[i][j] + other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE = []
for i in range(self.__height ):
SCREAMING_SNAKE_CASE = [
self.__matrix[i][j] - other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Matrix:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
def __mul__( self , lowerCAmelCase__ ) -> Vector | Matrix:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # matrix-vector
if len(lowerCAmelCase__ ) == self.__width:
SCREAMING_SNAKE_CASE = zero_vector(self.__height )
for i in range(self.__height ):
SCREAMING_SNAKE_CASE = [
self.__matrix[i][j] * other.component(lowerCAmelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase__ , sum(lowerCAmelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(lowerCAmelCase__ , (int, float) ): # matrix-scalar
SCREAMING_SNAKE_CASE = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
return None
def __A ( self ) -> int:
return self.__height
def __A ( self ) -> int:
return self.__width
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
SCREAMING_SNAKE_CASE = value
else:
raise Exception('change_component: indices out of bounds' )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
SCREAMING_SNAKE_CASE = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase__ ) ):
SCREAMING_SNAKE_CASE = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise Exception('Indices out of bounds' )
def __A ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
SCREAMING_SNAKE_CASE = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase__ ) for y in range(self.__width )
]
return sum(lowerCAmelCase__ )
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> Matrix:
SCREAMING_SNAKE_CASE = [[0] * n for _ in range(SCREAMING_SNAKE_CASE_ )]
return Matrix(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Matrix:
random.seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = [
[random.randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )
]
return Matrix(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 113 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
"""configuration_clap""": [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapAudioConfig""",
"""ClapConfig""",
"""ClapTextConfig""",
],
"""processing_clap""": ["""ClapProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapModel""",
"""ClapPreTrainedModel""",
"""ClapTextModel""",
"""ClapTextModelWithProjection""",
"""ClapAudioModel""",
"""ClapAudioModelWithProjection""",
]
lowercase = ["""ClapFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 35 | import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class __lowercase ( A ):
'''simple docstring'''
_A : int = '''align_text_model'''
def __init__( self : Tuple , _a : Tuple=30_522 , _a : str=768 , _a : Tuple=12 , _a : Dict=12 , _a : Any=3_072 , _a : str="gelu" , _a : int=0.1 , _a : Optional[Any]=0.1 , _a : int=512 , _a : List[str]=2 , _a : Any=0.02 , _a : Dict=1E-12 , _a : Tuple=0 , _a : Optional[Any]="absolute" , _a : str=True , **_a : Union[str, Any] , ):
super().__init__(**_a )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = use_cache
UpperCamelCase__ = pad_token_id
@classmethod
def A_ ( cls : List[str] , _a : Union[str, os.PathLike] , **_a : Any ):
cls._set_token_in_kwargs(_a )
UpperCamelCase__ , UpperCamelCase__ = cls.get_config_dict(_a , **_a )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
UpperCamelCase__ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a , **_a )
class __lowercase ( A ):
'''simple docstring'''
_A : List[Any] = '''align_vision_model'''
def __init__( self : List[str] , _a : int = 3 , _a : int = 600 , _a : float = 2.0 , _a : float = 3.1 , _a : int = 8 , _a : List[int] = [3, 3, 5, 3, 5, 5, 3] , _a : List[int] = [32, 16, 24, 40, 80, 112, 192] , _a : List[int] = [16, 24, 40, 80, 112, 192, 320] , _a : List[int] = [] , _a : List[int] = [1, 2, 2, 2, 1, 2, 1] , _a : List[int] = [1, 2, 2, 3, 3, 4, 1] , _a : List[int] = [1, 6, 6, 6, 6, 6, 6] , _a : float = 0.25 , _a : str = "swish" , _a : int = 2_560 , _a : str = "mean" , _a : float = 0.02 , _a : float = 0.001 , _a : float = 0.99 , _a : float = 0.2 , **_a : List[Any] , ):
super().__init__(**_a )
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = width_coefficient
UpperCamelCase__ = depth_coefficient
UpperCamelCase__ = depth_divisor
UpperCamelCase__ = kernel_sizes
UpperCamelCase__ = in_channels
UpperCamelCase__ = out_channels
UpperCamelCase__ = depthwise_padding
UpperCamelCase__ = strides
UpperCamelCase__ = num_block_repeats
UpperCamelCase__ = expand_ratios
UpperCamelCase__ = squeeze_expansion_ratio
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dim
UpperCamelCase__ = pooling_type
UpperCamelCase__ = initializer_range
UpperCamelCase__ = batch_norm_eps
UpperCamelCase__ = batch_norm_momentum
UpperCamelCase__ = drop_connect_rate
UpperCamelCase__ = sum(_a ) * 4
@classmethod
def A_ ( cls : Tuple , _a : Union[str, os.PathLike] , **_a : Union[str, Any] ):
cls._set_token_in_kwargs(_a )
UpperCamelCase__ , UpperCamelCase__ = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
UpperCamelCase__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a , **_a )
class __lowercase ( A ):
'''simple docstring'''
_A : List[Any] = '''align'''
_A : Optional[int] = True
def __init__( self : Optional[int] , _a : Tuple=None , _a : int=None , _a : Any=640 , _a : Optional[Any]=1.0 , _a : Tuple=0.02 , **_a : List[Any] , ):
super().__init__(**_a )
if text_config is None:
UpperCamelCase__ = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
UpperCamelCase__ = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
UpperCamelCase__ = AlignTextConfig(**_a )
UpperCamelCase__ = AlignVisionConfig(**_a )
UpperCamelCase__ = projection_dim
UpperCamelCase__ = temperature_init_value
UpperCamelCase__ = initializer_range
@classmethod
def A_ ( cls : Optional[int] , _a : AlignTextConfig , _a : AlignVisionConfig , **_a : Optional[Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_a )
def A_ ( self : Tuple ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.text_config.to_dict()
UpperCamelCase__ = self.vision_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 35 | 1 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __snake_case( _lowerCAmelCase ) -> Union[str, Any]:
if isinstance(_lowerCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class UpperCAmelCase_ :
"""simple docstring"""
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : List[Any] ):
pass
def lowerCamelCase ( self : Optional[int] ):
pass
def lowerCamelCase ( self : Optional[Any] ):
pass
def lowerCamelCase ( self : Dict , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Optional[Any]=None , **snake_case_ : List[Any] ):
snake_case__ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case_ , snake_case_ )
snake_case__ : Optional[Any] = TFVisionTextDualEncoderModel(snake_case_ )
snake_case__ : Tuple = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : List[Any]=None , **snake_case_ : Union[str, Any] ):
snake_case__ , snake_case__ : List[str] = self.get_vision_text_model(snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
snake_case__ : int = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : str=None , **snake_case_ : Union[str, Any] ):
snake_case__ , snake_case__ : Dict = self.get_vision_text_model(snake_case_ , snake_case_ )
snake_case__ : Dict = {"""vision_model""": vision_model, """text_model""": text_model}
snake_case__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case_ )
snake_case__ : Dict = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCamelCase ( self : Any , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : int=None , **snake_case_ : str ):
snake_case__ , snake_case__ : Union[str, Any] = self.get_vision_text_model(snake_case_ , snake_case_ )
snake_case__ : Any = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
snake_case__ : int = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
snake_case__ : int = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
snake_case__ : List[str] = TFVisionTextDualEncoderModel.from_pretrained(snake_case_ )
snake_case__ : Dict = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
snake_case__ : Tuple = after_output[0].numpy()
snake_case__ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case_ , 1E-5 )
def lowerCamelCase ( self : str , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : List[str]=None , **snake_case_ : List[str] ):
snake_case__ , snake_case__ : Optional[int] = self.get_vision_text_model(snake_case_ , snake_case_ )
snake_case__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
snake_case__ : int = model(
input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , output_attentions=snake_case_ )
snake_case__ : List[Any] = output.vision_model_output.attentions
self.assertEqual(len(snake_case_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ : Optional[Any] = to_atuple(vision_model.config.image_size )
snake_case__ : str = to_atuple(vision_model.config.patch_size )
snake_case__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
snake_case__ : Union[str, Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
snake_case__ : Any = output.text_model_output.attentions
self.assertEqual(len(snake_case_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCamelCase ( self : str , snake_case_ : np.ndarray , snake_case_ : np.ndarray , snake_case_ : float ):
snake_case__ : List[Any] = np.abs((a - b) ).max()
self.assertLessEqual(snake_case_ , snake_case_ , f"Difference between torch and flax is {diff} (>= {tol})." )
def lowerCamelCase ( self : Any ):
snake_case__ : int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**snake_case_ )
def lowerCamelCase ( self : str ):
snake_case__ : int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**snake_case_ )
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**snake_case_ )
def lowerCamelCase ( self : int ):
snake_case__ : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**snake_case_ )
@slow
def lowerCamelCase ( self : str ):
snake_case__ , snake_case__ : Any = self.get_pretrained_model_and_inputs()
snake_case__ : Union[str, Any] = model_a(**snake_case_ )
snake_case__ : Union[str, Any] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(snake_case_ )
snake_case__ : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(snake_case_ )
snake_case__ : int = model_a(**snake_case_ )
snake_case__ : Dict = after_outputs[0].numpy()
snake_case__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case_ , 1E-5 )
@require_tf
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
snake_case__ : Optional[int] = 13
snake_case__ : Tuple = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
snake_case__ : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
snake_case__ : Any = random_attention_mask([batch_size, 4] )
snake_case__ : Tuple = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCamelCase ( self : Optional[int] , snake_case_ : Tuple , snake_case_ : Optional[int] ):
snake_case__ : Union[str, Any] = TFViTModel(snake_case_ , name="""vision_model""" )
snake_case__ : Any = TFBertModel(snake_case_ , name="""text_model""" )
return vision_model, text_model
def lowerCamelCase ( self : str ):
snake_case__ : Union[str, Any] = TFViTModelTester(self )
snake_case__ : str = TFBertModelTester(self )
snake_case__ : Union[str, Any] = vit_model_tester.prepare_config_and_inputs()
snake_case__ : Any = bert_model_tester.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Optional[Any] = vision_config_and_inputs
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Dict ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
snake_case__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
snake_case__ : Any = 13
snake_case__ : Optional[int] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
snake_case__ : Optional[int] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
snake_case__ : Optional[Any] = random_attention_mask([batch_size, 4] )
snake_case__ : Dict = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCamelCase ( self : List[str] , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Dict , snake_case_ : Optional[int]=None , **snake_case_ : Optional[Any] ):
snake_case__ , snake_case__ : Any = self.get_vision_text_model(snake_case_ , snake_case_ )
snake_case__ : Tuple = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
snake_case__ : Union[str, Any] = model(
input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , output_attentions=snake_case_ )
snake_case__ : str = output.vision_model_output.attentions
self.assertEqual(len(snake_case_ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case__ : Tuple = to_atuple(vision_model.config.image_size )
snake_case__ : List[Any] = to_atuple(vision_model.config.patch_size )
snake_case__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
snake_case__ : Dict = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
snake_case__ : Optional[Any] = output.text_model_output.attentions
self.assertEqual(len(snake_case_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCamelCase ( self : Tuple , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
snake_case__ : Union[str, Any] = TFDeiTModel(snake_case_ , name="""vision_model""" )
snake_case__ : Tuple = TFRobertaModel(snake_case_ , name="""text_model""" )
return vision_model, text_model
def lowerCamelCase ( self : List[str] ):
snake_case__ : int = TFDeiTModelTester(self )
snake_case__ : Union[str, Any] = TFRobertaModelTester(self )
snake_case__ : Optional[Any] = vit_model_tester.prepare_config_and_inputs()
snake_case__ : str = bert_model_tester.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = vision_config_and_inputs
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Dict = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
snake_case__ : Tuple = 13
snake_case__ : Tuple = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
snake_case__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
snake_case__ : str = random_attention_mask([batch_size, 4] )
snake_case__ : Tuple = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCamelCase ( self : Optional[int] , snake_case_ : Tuple , snake_case_ : int ):
snake_case__ : List[str] = TFCLIPVisionModel(snake_case_ , name="""vision_model""" )
snake_case__ : Optional[Any] = TFBertModel(snake_case_ , name="""text_model""" )
return vision_model, text_model
def lowerCamelCase ( self : Dict ):
snake_case__ : int = TFCLIPVisionModelTester(self )
snake_case__ : Optional[int] = TFBertModelTester(self )
snake_case__ : str = clip_model_tester.prepare_config_and_inputs()
snake_case__ : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
snake_case__ , snake_case__ : List[Any] = vision_config_and_inputs
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=snake_case_ )
snake_case__ : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
snake_case__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
snake_case__ : List[str] = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=snake_case_ , padding=snake_case_ , return_tensors="""np""" )
snake_case__ : int = model(**snake_case_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
snake_case__ : Optional[int] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , snake_case_ , atol=1E-3 ) )
| 35 |
import numpy
# List of input, output pairs
_lowerCamelCase : Dict = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
_lowerCamelCase : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
_lowerCamelCase : Dict = [2, 4, 1, 5]
_lowerCamelCase : Dict = len(train_data)
_lowerCamelCase : int = 0.0_0_9
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]="train" ) -> Dict:
return calculate_hypothesis_value(UpperCAmelCase , UpperCAmelCase ) - output(
UpperCAmelCase , UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Any:
UpperCAmelCase : str = 0
for i in range(len(UpperCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> Optional[int]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a__ ( UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ) -> List[str]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : str=m ) -> Dict:
UpperCAmelCase : Optional[int] = 0
for i in range(UpperCAmelCase ):
if index == -1:
summation_value += _error(UpperCAmelCase )
else:
summation_value += _error(UpperCAmelCase ) * train_data[i][0][index]
return summation_value
def a__ ( UpperCAmelCase : Dict ) -> Dict:
UpperCAmelCase : Dict = summation_of_cost_derivative(UpperCAmelCase , UpperCAmelCase ) / m
return cost_derivative_value
def a__ ( ) -> List[Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase : List[str] = 0.000002
UpperCAmelCase : Any = 0
UpperCAmelCase : Dict = 0
while True:
j += 1
UpperCAmelCase : List[Any] = [0, 0, 0, 0]
for i in range(0 , len(UpperCAmelCase ) ):
UpperCAmelCase : List[str] = get_cost_derivative(i - 1 )
UpperCAmelCase : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCAmelCase , UpperCAmelCase , atol=UpperCAmelCase , rtol=UpperCAmelCase , ):
break
UpperCAmelCase : int = temp_parameter_vector
print(('''Number of iterations:''', j) )
def a__ ( ) -> List[Any]:
for i in range(len(UpperCAmelCase ) ):
print(('''Actual output value:''', output(UpperCAmelCase , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(UpperCAmelCase , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 336 | 0 |
'''simple docstring'''
from timeit import timeit
def a ( __a ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCamelCase__ :List[Any] = 0
while number:
number &= number - 1
result += 1
return result
def a ( __a ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCamelCase__ :Any = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a ( ) -> None:
'''simple docstring'''
def do_benchmark(__a ) -> None:
UpperCamelCase__ :Tuple = '''import __main__ as z'''
print(f'''Benchmark when {number = }:''' )
print(f'''{get_set_bits_count_using_modulo_operator(__a ) = }''' )
UpperCamelCase__ :Optional[Any] = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=__a )
print(f'''timeit() runs in {timing} seconds''' )
print(f'''{get_set_bits_count_using_brian_kernighans_algorithm(__a ) = }''' )
UpperCamelCase__ :List[Any] = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=__a , )
print(f'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(__a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 219 |
'''simple docstring'''
import socket
def a ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :int = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
UpperCamelCase__ :List[Any] = socket.gethostname()
UpperCamelCase__ :List[str] = 12312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
UpperCamelCase__ :str = sock.recv(1024 )
if not data:
break
out_file.write(__a )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main() | 219 | 1 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCamelCase ( _UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : Any = TransfoXLTokenizer
__a : Optional[Any] = False
__a : str = False
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
__lowercase = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
__lowercase = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
__lowercase = '''<unk> UNwanted , running'''
__lowercase = '''<unk> unwanted, running'''
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowerCAmelCase__ )
__lowercase = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(lowerCAmelCase__ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [0, 4, 8, 7] )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
__lowercase = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
__lowercase = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = len(lowerCAmelCase__ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCAmelCase__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' ) | 210 | import requests
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = {'''Content-Type''': '''application/json'''}
__lowercase = requests.post(lowercase , json={'''text''': message_body} , headers=lowercase )
if response.status_code != 200:
__lowercase = (
'''Request to slack returned an error '''
F"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(lowercase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""") | 210 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 367 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[int]=False ):
'''simple docstring'''
try:
__lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCamelCase = strtobool(A__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
UpperCAmelCase_ = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skip("""Test was skipped""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(A__ )
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple=None , A__ : Optional[Any]=None ):
'''simple docstring'''
if test_case is None:
return partial(A__ , version=A__ )
return unittest.skipUnless(is_torch_version(""">=""" , A__ ) , f'test requires torch version >= {version}' )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(A__ )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(A__ )
UpperCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(A__ )
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : List[Any] = True
@classmethod
def lowerCAmelCase__ ( cls: int ):
__lowerCamelCase = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase__ ( cls: Any ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase__ ( self: Any ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase_ )
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: int ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[mock.Mock, List[mock.Mock]] ):
__lowerCamelCase = mocks if isinstance(UpperCamelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = AcceleratorState()
__lowerCamelCase = tensor[None].clone().to(state.device )
__lowerCamelCase = gather(A__ ).cpu()
__lowerCamelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , A__ ):
return False
return True
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any ):
__lowerCamelCase = returncode
__lowerCamelCase = stdout
__lowerCamelCase = stderr
async def lowerCamelCase__ ( A__ : int , A__ : Any ):
'''simple docstring'''
while True:
__lowerCamelCase = await stream.readline()
if line:
callback(A__ )
else:
break
async def lowerCamelCase__ ( A__ : Dict , A__ : List[str]=None , A__ : Any=None , A__ : Optional[Any]=None , A__ : Tuple=False , A__ : List[Any]=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(A__ ) )
__lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCamelCase = []
__lowerCamelCase = []
def tee(A__ : int , A__ : Any , A__ : Optional[Any] , A__ : int="" ):
__lowerCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(A__ )
if not quiet:
print(A__ , A__ , file=A__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda A__ : tee(A__ , A__ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda A__ : tee(A__ , A__ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=A__ , )
return _RunOutput(await p.wait() , A__ , A__ )
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Any=None , A__ : Union[str, Any]=None , A__ : Dict=180 , A__ : str=False , A__ : List[Any]=True ):
'''simple docstring'''
__lowerCamelCase = asyncio.get_event_loop()
__lowerCamelCase = loop.run_until_complete(
_stream_subprocess(A__ , env=A__ , stdin=A__ , timeout=A__ , quiet=A__ , echo=A__ ) )
__lowerCamelCase = """ """.join(A__ )
if result.returncode > 0:
__lowerCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowerCamelCase__( __lowerCamelCase):
pass
def lowerCamelCase__ ( A__ : List[str] , A__ : Union[str, Any]=False ):
'''simple docstring'''
try:
__lowerCamelCase = subprocess.check_output(A__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(A__ , """decode""" ):
__lowerCamelCase = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(A__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 29 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 339 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
UpperCAmelCase__ = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def A ( _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
_UpperCAmelCase = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def A ( _UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = list(s_dict.keys() )
for key in keys:
_UpperCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_UpperCAmelCase = new_key.replace(_UpperCAmelCase , _UpperCAmelCase )
print(F"{key} -> {new_key}" )
_UpperCAmelCase = s_dict.pop(_UpperCAmelCase )
return s_dict
def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
_UpperCAmelCase = emb.weight.data
return lin_layer
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> bytes:
'''simple docstring'''
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_UpperCAmelCase = os.path.basename(_UpperCAmelCase )
_UpperCAmelCase = url.split('/' )[-2]
_UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if os.path.exists(_UpperCAmelCase ) and not os.path.isfile(_UpperCAmelCase ):
raise RuntimeError(F"{download_target} exists and is not a regular file" )
if os.path.isfile(_UpperCAmelCase ):
_UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(_UpperCAmelCase ) as source, open(_UpperCAmelCase , 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_UpperCAmelCase , unit_divisor=1_024 ) as loop:
while True:
_UpperCAmelCase = source.read(8_192 )
if not buffer:
break
output.write(_UpperCAmelCase )
loop.update(len(_UpperCAmelCase ) )
_UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
if ".pt" not in checkpoint_path:
_UpperCAmelCase = _download(_MODELS[checkpoint_path] )
else:
_UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' )
_UpperCAmelCase = original_checkpoint['dims']
_UpperCAmelCase = original_checkpoint['model_state_dict']
_UpperCAmelCase = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(_UpperCAmelCase )
rename_keys(_UpperCAmelCase )
_UpperCAmelCase = True
_UpperCAmelCase = state_dict['decoder.layers.0.fc1.weight'].shape[0]
_UpperCAmelCase = WhisperConfig(
vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_UpperCAmelCase , decoder_ffn_dim=_UpperCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , )
_UpperCAmelCase = WhisperForConditionalGeneration(_UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F" but all the following weights are missing {missing}" )
if tie_embeds:
_UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_UpperCAmelCase = proj_out_weights
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 339 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __A ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = "ssube/stable-diffusion-x4-upscaler-onnx"
def SCREAMING_SNAKE_CASE ( self , __A=0 ) -> Dict:
a =floats_tensor((1, 3, 128, 128) , rng=random.Random(__A ) )
a =torch.manual_seed(__A )
a ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__A )
a =self.get_dummy_inputs()
a =pipe(**__A ).images
a =image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
a =np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
a =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A )
pipe.set_progress_bar_config(disable=__A )
a =self.get_dummy_inputs()
a =pipe(**__A ).images
a =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a =np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
a =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
a =self.get_dummy_inputs()
a =pipe(**__A ).images
a =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a =np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
a =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
a =self.get_dummy_inputs()
a =pipe(**__A ).images
a =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a =np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
a =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
a =self.get_dummy_inputs()
a =pipe(**__A ).images
a =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a =np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __A ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =ort.SessionOptions()
a =False
return options
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
a =init_image.resize((128, 128) )
# using the PNDM scheduler by default
a =OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__A )
a ='''A fantasy landscape, trending on artstation'''
a =torch.manual_seed(0 )
a =pipe(
prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=10 , generator=__A , output_type='''np''' , )
a =output.images
a =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
a =np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
a =init_image.resize((128, 128) )
a =LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
a =OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__A )
a ='''A fantasy landscape, trending on artstation'''
a =torch.manual_seed(0 )
a =pipe(
prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=20 , generator=__A , output_type='''np''' , )
a =output.images
a =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
a =np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 | 351 |
"""simple docstring"""
from __future__ import annotations
def _A ( lowercase , lowercase , lowercase , ):
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 215 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(_UpperCAmelCase ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 |
'''simple docstring'''
A__: Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__: Any = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__: int = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> str:
assert len(str(_UpperCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_a : List[str] =year // 100
_a : List[str] =(5 * (century % 4) + 2) % 7
_a : Optional[int] =year % 100
_a : Any =centurian % 12
_a : int =(
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_a : Optional[Any] =(
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_a : str =(dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 | 1 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
lowercase_ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
lowercase_ = "sshleifer/student_marian_en_ro_6_1"
lowercase_ = "sshleifer/tiny-mbart"
@require_torch
class A ( _UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self : List[Any],lowercase_ : List[str]=False,lowercase_ : Any=None,lowercase_ : Union[str, Any]=True,lowercase_ : List[str]=True,lowercase_ : Optional[Any]=True,lowercase_ : Dict=True,)-> Optional[int]:
'''simple docstring'''
A__ = self.run_trainer(
eval_steps=1,max_len=1_2,model_name=lowercase_,num_train_epochs=1,distributed=lowercase_,extra_args_str=lowercase_,predict_with_generate=lowercase_,do_train=lowercase_,do_eval=lowercase_,do_predict=lowercase_,)
A__ = TrainerState.load_from_json(os.path.join(lowercase_,'trainer_state.json' ) ).log_history
if not do_eval:
return
A__ = [log for log in logs if 'eval_loss' in log.keys()]
A__ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
A__ = eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'],lowercase_ )
assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def snake_case__ ( self : int )-> Any:
'''simple docstring'''
self.run_seqaseq_quick()
@require_torch_multi_gpu
def snake_case__ ( self : Dict )-> Any:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowercase_ )
@require_torch_multi_gpu
def snake_case__ ( self : Union[str, Any] )-> List[Any]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowercase_ )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def snake_case__ ( self : List[Any] )-> Optional[Any]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowercase_,extra_args_str='--sharded_ddp simple' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def snake_case__ ( self : Tuple )-> int:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowercase_,extra_args_str='--sharded_ddp simple --fp16' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def snake_case__ ( self : List[Any] )-> List[Any]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowercase_,extra_args_str='--sharded_ddp zero_dp_2',predict_with_generate=lowercase_ )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def snake_case__ ( self : List[Any] )-> Optional[Any]:
'''simple docstring'''
self.run_seqaseq_quick(
distributed=lowercase_,extra_args_str='--sharded_ddp zero_dp_2 --fp16',predict_with_generate=lowercase_ )
@require_apex
@require_torch_gpu
def snake_case__ ( self : Union[str, Any] )-> List[str]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowercase_,extra_args_str='--fp16 --fp16_backend=apex' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=lowercase_,extra_args_str='--fp16 --fp16_backend=apex' )
@parameterized.expand(['base', 'low', 'high', 'mixed'] )
@require_torch_multi_gpu
def snake_case__ ( self : List[Any],lowercase_ : List[Any] )-> List[str]:
'''simple docstring'''
A__ = {
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
A__ = experiments[experiment_id]
A__ = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
A__ = 'Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**lowercase_,extra_args_str=data['extra_args_str'] )
A__ = len(re.findall(lowercase_,cl.err ) )
self.assertEqual(lowercase_,data['n_matches'] )
@slow
def snake_case__ ( self : Tuple )-> Union[str, Any]:
'''simple docstring'''
A__ = self.run_trainer(
eval_steps=2,max_len=1_2_8,model_name=lowercase_,learning_rate=3E-4,num_train_epochs=1_0,distributed=lowercase_,)
# Check metrics
A__ = TrainerState.load_from_json(os.path.join(lowercase_,'trainer_state.json' ) ).log_history
A__ = [log for log in logs if 'eval_loss' in log.keys()]
A__ = eval_metrics[0]
A__ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'],lowercase_ )
# test if do_predict saves generations and metrics
A__ = os.listdir(lowercase_ )
A__ = {os.path.basename(lowercase_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def snake_case__ ( self : List[str] )-> Optional[int]:
'''simple docstring'''
from transformers.training_args import OptimizerNames
def train_and_return_metrics(lowercase_ : str ) -> Tuple[int, float]:
A__ = '--skip_memory_metrics 0'
A__ = self.run_trainer(
max_len=1_2_8,model_name=lowercase_,learning_rate=3E-4,num_train_epochs=1,optim=lowercase_,distributed=lowercase_,extra_args_str=lowercase_,do_eval=lowercase_,do_predict=lowercase_,n_gpus_to_use=1,)
# Check metrics
A__ = TrainerState.load_from_json(Path(lowercase_,'trainer_state.json' ) ).log_history
A__ = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**2_0 )
A__ = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**2_0 )
A__ = logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
A__ , A__ , A__ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
A__ , A__ , A__ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
A__ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
A__ = gpu_peak_mem_orig + gpu_alloc_mem_orig
A__ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
A__ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
A__ = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
lowercase_,lowercase_,'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
F' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'
F' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB',)
self.assertGreater(
lowercase_,lowercase_,'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
F' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'
F' gpu_total_mem_bnb={gpu_total_mem_bnb}MB',)
self.assertEqual(
lowercase_,lowercase_,F'loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}' )
def snake_case__ ( self : Any,lowercase_ : int,lowercase_ : str,lowercase_ : int,lowercase_ : float = 3E-3,lowercase_ : str = "adafactor",lowercase_ : bool = False,lowercase_ : str = None,lowercase_ : int = 0,lowercase_ : bool = True,lowercase_ : bool = True,lowercase_ : bool = True,lowercase_ : bool = True,lowercase_ : int = None,)-> Optional[int]:
'''simple docstring'''
A__ = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
A__ = self.get_auto_remove_tmp_dir()
A__ = F'\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(lowercase_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(lowercase_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '.split()
A__ = F'\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(lowercase_ )}\n '.split()
A__ = '\n --do_predict\n '.split()
A__ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'--optim {optim}'.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
A__ = get_gpu_count()
A__ = get_torch_dist_unique_port()
A__ = F'\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '.split()
A__ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase_,env=self.get_env() )
else:
A__ = ['run_translation.py'] + args
with patch.object(lowercase_,'argv',lowercase_ ):
main()
return output_dir
| 356 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 282 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''dpt'''
def __init__( self : str ,A_ : Tuple=768 ,A_ : int=12 ,A_ : Optional[int]=12 ,A_ : Optional[int]=3072 ,A_ : List[str]="gelu" ,A_ : str=0.0 ,A_ : int=0.0 ,A_ : str=0.02 ,A_ : str=1e-12 ,A_ : str=384 ,A_ : Dict=16 ,A_ : Union[str, Any]=3 ,A_ : Dict=False ,A_ : Any=True ,A_ : Optional[int]=[2, 5, 8, 11] ,A_ : Optional[Any]="project" ,A_ : Tuple=[4, 2, 1, 0.5] ,A_ : int=[96, 192, 384, 768] ,A_ : int=256 ,A_ : str=-1 ,A_ : Optional[int]=False ,A_ : Optional[int]=True ,A_ : Union[str, Any]=0.4 ,A_ : Union[str, Any]=255 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=[1, 1024, 24, 24] ,A_ : List[str]=[0, 1] ,A_ : List[Any]=None ,**A_ : Tuple ,) -> Union[str, Any]:
super().__init__(**A_ )
A = hidden_size
A = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
A = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
A = BitConfig(**A_ )
elif isinstance(A_ ,A_ ):
logger.info('Initializing the config with a `BiT` backbone.' )
A = BitConfig(**A_ )
elif isinstance(A_ ,A_ ):
A = backbone_config
else:
raise ValueError(
F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
A = backbone_featmap_shape
A = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
A = None
A = None
A = []
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
A = readout_type
A = reassemble_factors
A = neck_hidden_sizes
A = fusion_hidden_size
A = head_in_index
A = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
A = use_auxiliary_head
A = auxiliary_loss_weight
A = semantic_loss_ignore_index
A = semantic_classifier_dropout
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
A = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
A = self.backbone_config.to_dict()
A = self.__class__.model_type
return output | 74 |
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowercase (SCREAMING_SNAKE_CASE_ : BertModel , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> Dict:
SCREAMING_SNAKE_CASE = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
SCREAMING_SNAKE_CASE = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = model.state_dict()
def to_tf_var_name(SCREAMING_SNAKE_CASE_ : str ):
for patt, repl in iter(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return F'bert/{name}'
def create_tf_var(SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : tf.Session ):
SCREAMING_SNAKE_CASE = tf.dtypes.as_dtype(tensor.dtype )
SCREAMING_SNAKE_CASE = tf.get_variable(dtype=SCREAMING_SNAKE_CASE_ , shape=tensor.shape , name=SCREAMING_SNAKE_CASE_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(SCREAMING_SNAKE_CASE_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
SCREAMING_SNAKE_CASE = to_tf_var_name(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
SCREAMING_SNAKE_CASE = torch_tensor.T
SCREAMING_SNAKE_CASE = create_tf_var(tensor=SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , session=SCREAMING_SNAKE_CASE_ )
tf.keras.backend.set_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = session.run(SCREAMING_SNAKE_CASE_ )
print(F'Successfully created {tf_name}: {np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}' )
SCREAMING_SNAKE_CASE = tf.train.Saver(tf.trainable_variables() )
saver.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ) -> Any:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='Directory in which to save tensorflow model' )
SCREAMING_SNAKE_CASE = parser.parse_args(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=SCREAMING_SNAKE_CASE_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 113 | 0 |
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
UpperCamelCase_ : Optional[Any] = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def __a ( _UpperCamelCase: Dict , _UpperCamelCase: Dict ) -> Optional[int]:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def __a ( _UpperCamelCase: List[str] ) -> Tuple:
"""simple docstring"""
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=A__ )
def __a ( _UpperCamelCase: Optional[int] , _UpperCamelCase: Optional[Any] ) -> Dict:
"""simple docstring"""
_snake_case = tmp_path_factory.getbasetemp() / """cache"""
_snake_case = test_hf_cache_home / """datasets"""
_snake_case = test_hf_cache_home / """metrics"""
_snake_case = test_hf_cache_home / """modules"""
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(A__ ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(A__ ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(A__ ) )
_snake_case = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(A__ ) )
_snake_case = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(A__ ) )
@pytest.fixture(autouse=A__ , scope="session" )
def __a ( ) -> Tuple:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=A__ )
def __a ( _UpperCamelCase: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , A__ )
@pytest.fixture
def __a ( _UpperCamelCase: Union[str, Any] ) -> int:
"""simple docstring"""
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , A__ )
| 351 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCamelCase_ : int = TypeVar('''T''')
UpperCamelCase_ : Dict = TypeVar('''U''')
class _a ( Generic[T, U] ):
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
_snake_case = key
_snake_case = val
_snake_case = None
_snake_case = None
def __repr__( self ) -> str:
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class _a ( Generic[T, U] ):
def __init__( self ) -> None:
_snake_case = DoubleLinkedListNode(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = DoubleLinkedListNode(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case , _snake_case = self.rear, self.head
def __repr__( self ) -> str:
_snake_case = ["DoubleLinkedList"]
_snake_case = self.head
while node.next is not None:
rep.append(str(_SCREAMING_SNAKE_CASE ) )
_snake_case = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> None:
_snake_case = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_snake_case = node
_snake_case = previous
_snake_case = node
_snake_case = self.rear
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
_snake_case = node.next
_snake_case = node.prev
_snake_case = None
_snake_case = None
return node
class _a ( Generic[T, U] ):
SCREAMING_SNAKE_CASE_ : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> str:
_snake_case = DoubleLinkedList()
_snake_case = capacity
_snake_case = 0
_snake_case = 0
_snake_case = 0
_snake_case = {}
def __repr__( self ) -> str:
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self ,_SCREAMING_SNAKE_CASE ) -> bool:
return key in self.cache
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_snake_case = self.cache[key]
_snake_case = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_SCREAMING_SNAKE_CASE )
return node.val
self.miss += 1
return None
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_snake_case = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_SCREAMING_SNAKE_CASE ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_snake_case = DoubleLinkedListNode(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_snake_case = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_snake_case = value
self.list.add(_SCREAMING_SNAKE_CASE )
@classmethod
def _lowercase ( cls ,_SCREAMING_SNAKE_CASE = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(_SCREAMING_SNAKE_CASE ) -> Callable[..., U]:
def cache_decorator_wrapper(*_SCREAMING_SNAKE_CASE ) -> U:
if func not in cls.decorator_function_to_instance_map:
_snake_case = LRUCache(_SCREAMING_SNAKE_CASE )
_snake_case = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_snake_case = func(*_SCREAMING_SNAKE_CASE )
cls.decorator_function_to_instance_map[func].put(args[0] ,_SCREAMING_SNAKE_CASE )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_SCREAMING_SNAKE_CASE ,"cache_info" ,_SCREAMING_SNAKE_CASE ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142 | 0 |
def lowerCamelCase__ ( __lowerCAmelCase : int = 1000 ):
"""simple docstring"""
lowerCAmelCase_ = 2**power
lowerCAmelCase_ = str(__lowerCAmelCase )
lowerCAmelCase_ = list(__lowerCAmelCase )
lowerCAmelCase_ = 0
for i in list_num:
sum_of_num += int(__lowerCAmelCase )
return sum_of_num
if __name__ == "__main__":
_A = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
_A = solution(power)
print("Sum of the digits is: ", result)
| 231 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __a , unittest.TestCase ):
_lowercase =None
_lowercase =BloomTokenizerFast
_lowercase =BloomTokenizerFast
_lowercase =True
_lowercase =False
_lowercase ='''tokenizer_file'''
_lowercase ={'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def __a ( self ) -> Dict:
super().setUp()
lowerCAmelCase_ = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self , **_UpperCamelCase ) -> Tuple:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
lowerCAmelCase_ = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
lowerCAmelCase_ = tokenizer.batch_encode_plus(_UpperCamelCase )["input_ids"]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = tokenizer.batch_decode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __a ( self , _UpperCamelCase=6 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowerCAmelCase_ = "This is a simple input"
lowerCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ = ("This is a simple input", "This is a pair")
lowerCAmelCase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(_UpperCamelCase , max_length=_UpperCamelCase )
tokenizer_r.encode_plus(_UpperCamelCase , max_length=_UpperCamelCase )
tokenizer_r.batch_encode_plus(_UpperCamelCase , max_length=_UpperCamelCase )
tokenizer_r.encode(_UpperCamelCase , max_length=_UpperCamelCase )
tokenizer_r.batch_encode_plus(_UpperCamelCase , max_length=_UpperCamelCase )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
lowerCAmelCase_ = None # Hotfixing padding = None
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = load_dataset("xnli" , "all_languages" , split="test" , streaming=_UpperCamelCase )
lowerCAmelCase_ = next(iter(_UpperCamelCase ) )["premise"] # pick up one data
lowerCAmelCase_ = list(sample_data.values() )
lowerCAmelCase_ = list(map(tokenizer.encode , _UpperCamelCase ) )
lowerCAmelCase_ = [tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase ) for x in output_tokens]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> List[Any]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 231 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = ["""image_processor""", """tokenizer"""]
UpperCAmelCase__ = """ViTImageProcessor"""
UpperCAmelCase__ = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Optional[Any] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Dict=None , **UpperCAmelCase : Tuple ) -> int:
lowerCamelCase__ : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCAmelCase , )
lowerCamelCase__ : str = kwargs.pop('feature_extractor' )
lowerCamelCase__ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[Any] , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Optional[Any]=None , **UpperCAmelCase : int ) -> Optional[Any]:
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
lowerCamelCase__ : str = self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if visual_prompt is not None:
lowerCamelCase__ : Dict = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if images is not None:
lowerCamelCase__ : Dict = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if visual_prompt is not None and images is not None:
lowerCamelCase__ : Optional[Any] = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCamelCase__ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCamelCase__ : Any = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def A_ ( self : Optional[Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Tuple ) -> List[Any]:
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : List[str] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[int] ) -> Tuple:
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A_ ( self : Optional[Any] ) -> List[str]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase , )
return self.image_processor_class
@property
def A_ ( self : Dict ) -> List[str]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCAmelCase , )
return self.image_processor
| 362 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[str] = logging.get_logger()
@dataclass
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = field(default_factory=__UpperCamelCase )
UpperCAmelCase__ = field(default_factory=__UpperCamelCase )
def A_ ( self : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tensor , UpperCAmelCase : Tensor ) -> Any:
lowerCamelCase__ : List[str] = len(list(m.modules() ) ) == 1 or isinstance(UpperCAmelCase , nn.Convad ) or isinstance(UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCAmelCase )
def __call__( self : Any , UpperCAmelCase : Tensor ) -> Dict:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def A_ ( self : List[str] ) -> int:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 0
UpperCAmelCase__ = field(default_factory=__UpperCamelCase )
UpperCAmelCase__ = field(default_factory=__UpperCamelCase )
def __call__( self : Any , UpperCAmelCase : Tensor ) -> int:
lowerCamelCase__ : Union[str, Any] = Tracker(self.dest )(UpperCAmelCase ).parametrized
lowerCamelCase__ : List[Any] = Tracker(self.src )(UpperCAmelCase ).parametrized
lowerCamelCase__ : Any = list(filter(lambda UpperCAmelCase : type(UpperCAmelCase ) not in self.src_skip , UpperCAmelCase ) )
lowerCamelCase__ : int = list(filter(lambda UpperCAmelCase : type(UpperCAmelCase ) not in self.dest_skip , UpperCAmelCase ) )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise Exception(
F"""Numbers of operations are different. Source module has {len(UpperCAmelCase )} operations while"""
F""" destination module has {len(UpperCAmelCase )}.""" )
for dest_m, src_m in zip(UpperCAmelCase , UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ) -> Any:
print(F"""Converting {name}...""" )
with torch.no_grad():
lowerCamelCase__ : int = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase ).eval()
lowerCamelCase__ : Union[str, Any] = ResNetForImageClassification(_UpperCAmelCase ).eval()
lowerCamelCase__ : str = ModuleTransfer(src=_UpperCAmelCase , dest=_UpperCAmelCase )
lowerCamelCase__ : Optional[int] = torch.randn((1, 3, 224, 224) )
module_transfer(_UpperCAmelCase )
assert torch.allclose(from_model(_UpperCAmelCase ) , our_model(_UpperCAmelCase ).logits ), "The model logits don't match the original one."
lowerCamelCase__ : Union[str, Any] = F"""resnet{"-".join(name.split("resnet" ) )}"""
print(_UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , )
# we can use the convnext one
lowerCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , )
print(F"""Pushed {checkpoint_name}""" )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True ) -> List[str]:
lowerCamelCase__ : Dict = 'imagenet-1k-id2label.json'
lowerCamelCase__ : Optional[int] = 1000
lowerCamelCase__ : int = (1, num_labels)
lowerCamelCase__ : Any = 'huggingface/label-files'
lowerCamelCase__ : str = num_labels
lowerCamelCase__ : Any = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ : Any = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : str = idalabel
lowerCamelCase__ : Any = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = partial(_UpperCAmelCase , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(_UpperCAmelCase , names_to_config[model_name] , _UpperCAmelCase , _UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
_UpperCAmelCase : str = parser.parse_args()
_UpperCAmelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 45 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Tuple = '''trocr'''
_UpperCAmelCase : int = ['''past_key_values''']
_UpperCAmelCase : Any = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str=5_0_2_6_5 ,SCREAMING_SNAKE_CASE__ : int=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : List[str]=1_2 ,SCREAMING_SNAKE_CASE__ : str=1_6 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=4_0_9_6 ,SCREAMING_SNAKE_CASE__ : str="gelu" ,SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 ,SCREAMING_SNAKE_CASE__ : Tuple=0.1 ,SCREAMING_SNAKE_CASE__ : Tuple=0.0 ,SCREAMING_SNAKE_CASE__ : str=0.0 ,SCREAMING_SNAKE_CASE__ : Any=2 ,SCREAMING_SNAKE_CASE__ : Any=0.02 ,SCREAMING_SNAKE_CASE__ : Tuple=0.0 ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : int=1 ,SCREAMING_SNAKE_CASE__ : str=0 ,SCREAMING_SNAKE_CASE__ : List[str]=2 ,**SCREAMING_SNAKE_CASE__ : int ,):
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : Dict = d_model
__lowerCamelCase : Union[str, Any] = decoder_layers
__lowerCamelCase : Optional[int] = decoder_attention_heads
__lowerCamelCase : str = decoder_ffn_dim
__lowerCamelCase : Optional[Any] = activation_function
__lowerCamelCase : List[Any] = max_position_embeddings
__lowerCamelCase : Dict = dropout
__lowerCamelCase : Any = attention_dropout
__lowerCamelCase : List[str] = activation_dropout
__lowerCamelCase : Optional[Any] = init_std
__lowerCamelCase : Tuple = decoder_layerdrop
__lowerCamelCase : Dict = use_cache
__lowerCamelCase : Dict = scale_embedding
__lowerCamelCase : List[str] = use_learned_position_embeddings
__lowerCamelCase : int = layernorm_embedding
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,decoder_start_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
| 73 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase ) ->float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(_UpperCamelCase ) / len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Dict )-> int:
'''simple docstring'''
A__ = 'ylacombe/bark-small'
A__ = tempfile.mkdtemp()
A__ = 'en_speaker_1'
A__ = 'This is a test string'
A__ = 'speaker_embeddings_path.json'
A__ = 'speaker_embeddings'
def snake_case__ ( self : Tuple,**lowercase_ : str )-> int:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint,**__lowercase )
def snake_case__ ( self : Optional[int] )-> Dict:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self : Any )-> List[str]:
'''simple docstring'''
A__ = self.get_tokenizer()
A__ = BarkProcessor(tokenizer=__lowercase )
processor.save_pretrained(self.tmpdirname )
A__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab(),tokenizer.get_vocab() )
@slow
def snake_case__ ( self : Optional[int] )-> Dict:
'''simple docstring'''
A__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path,)
processor.save_pretrained(
self.tmpdirname,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path,speaker_embeddings_directory=self.speaker_embeddings_directory,)
A__ = self.get_tokenizer(bos_token='(BOS)',eos_token='(EOS)' )
A__ = BarkProcessor.from_pretrained(
self.tmpdirname,self.speaker_embeddings_dict_path,bos_token='(BOS)',eos_token='(EOS)',)
self.assertEqual(processor.tokenizer.get_vocab(),tokenizer_add_kwargs.get_vocab() )
def snake_case__ ( self : Optional[Any] )-> List[str]:
'''simple docstring'''
A__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path,)
A__ = 3_5
A__ = 2
A__ = 8
A__ = {
'semantic_prompt': np.ones(__lowercase ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
A__ = processor(text=self.input_string,voice_preset=__lowercase )
A__ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist(),processed_voice_preset.get(__lowercase,np.array([] ) ).tolist() )
# test loading voice preset from npz file
A__ = os.path.join(self.tmpdirname,'file.npz' )
np.savez(__lowercase,**__lowercase )
A__ = processor(text=self.input_string,voice_preset=__lowercase )
A__ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist(),processed_voice_preset.get(__lowercase,np.array([] ) ).tolist() )
# test loading voice preset from the hub
A__ = processor(text=self.input_string,voice_preset=self.voice_preset )
def snake_case__ ( self : List[str] )-> int:
'''simple docstring'''
A__ = self.get_tokenizer()
A__ = BarkProcessor(tokenizer=__lowercase )
A__ = processor(text=self.input_string )
A__ = tokenizer(
self.input_string,padding='max_length',max_length=2_5_6,add_special_tokens=__lowercase,return_attention_mask=__lowercase,return_token_type_ids=__lowercase,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key],encoded_processor[key].squeeze().tolist() )
| 359 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowercase_ = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase_ = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
lowercase_ = "zero2"
lowercase_ = "zero3"
lowercase_ = [ZEROa, ZEROa]
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
'''simple docstring'''
A__ = parameterized.to_safe_name('_'.join(str(SCREAMING_SNAKE_CASE__ ) for x in param.args ) )
return f'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
lowercase_ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class A ( _UpperCAmelCase ):
"""simple docstring"""
@parameterized.expand(lowercase_,name_func=lowercase_ )
def snake_case__ ( self : int,lowercase_ : str,lowercase_ : Any )-> Optional[int]:
'''simple docstring'''
self.run_and_check(
stage=lowercase_,model=lowercase_,distributed=lowercase_,fpaa=lowercase_,)
@require_torch_multi_gpu
@parameterized.expand(lowercase_,name_func=lowercase_ )
def snake_case__ ( self : Union[str, Any],lowercase_ : Optional[Any],lowercase_ : List[Any] )-> int:
'''simple docstring'''
self.run_and_check(
stage=lowercase_,model=lowercase_,distributed=lowercase_,fpaa=lowercase_,)
@parameterized.expand(lowercase_,name_func=lowercase_ )
def snake_case__ ( self : List[str],lowercase_ : List[str],lowercase_ : List[Any] )-> Any:
'''simple docstring'''
self.run_and_check(
stage=lowercase_,model=lowercase_,distributed=lowercase_,fpaa=lowercase_,)
@require_torch_multi_gpu
@parameterized.expand(lowercase_,name_func=lowercase_ )
def snake_case__ ( self : Dict,lowercase_ : Optional[Any],lowercase_ : List[Any] )-> Optional[int]:
'''simple docstring'''
self.run_and_check(
stage=lowercase_,model=lowercase_,distributed=lowercase_,fpaa=lowercase_,)
def snake_case__ ( self : Tuple,lowercase_ : Any )-> Union[str, Any]:
'''simple docstring'''
pass
def snake_case__ ( self : int,lowercase_ : str,lowercase_ : str,lowercase_ : int = 1_0,lowercase_ : bool = True,lowercase_ : bool = True,lowercase_ : bool = True,)-> Union[str, Any]:
'''simple docstring'''
A__ = models[model]
A__ = self.run_trainer(
stage=lowercase_,model_name=lowercase_,eval_steps=lowercase_,num_train_epochs=1,distributed=lowercase_,fpaa=lowercase_,)
self.do_checks(lowercase_ )
return output_dir
def snake_case__ ( self : Union[str, Any],lowercase_ : str,lowercase_ : str,lowercase_ : int = 1_0,lowercase_ : int = 1,lowercase_ : bool = True,lowercase_ : bool = True,)-> Any:
'''simple docstring'''
A__ = self.get_auto_remove_tmp_dir('./xxx',after=lowercase_ )
A__ = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(lowercase_ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A__ = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
A__ = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
A__ = self.get_launcher(lowercase_ )
A__ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase_,env=self.get_env() )
return output_dir
def snake_case__ ( self : Any,lowercase_ : int=False )-> Tuple:
'''simple docstring'''
A__ = min(2,get_gpu_count() ) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 282 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class snake_case ( lowercase ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
# test for the above condition
self.test()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = 0
lowerCamelCase_ = False
while not completed:
if counter == 1:
self.reset()
lowerCamelCase_ = self.advance()
if not self.does_advance(UpperCamelCase ):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = self.update(UpperCamelCase )
counter += 1
if counter > 1_0000:
raise Exception("update() does not fulfill the constraint." )
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly." )
@abstractmethod
def snake_case ( self ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def snake_case ( self ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def snake_case ( self ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def snake_case ( self , UpperCamelCase=False ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class snake_case ( lowercase ):
"""simple docstring"""
def __init__( self , UpperCamelCase ):
"""simple docstring"""
super(UpperCamelCase , self ).__init__()
if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
lowerCamelCase_ = token_ids
lowerCamelCase_ = len(self.token_ids )
lowerCamelCase_ = -1 # the index of the currently fulfilled step
lowerCamelCase_ = False
def snake_case ( self ):
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
if self.does_advance(UpperCamelCase ):
self.fulfilled_idx += 1
lowerCamelCase_ = True
if self.fulfilled_idx == (self.seqlen - 1):
lowerCamelCase_ = True
lowerCamelCase_ = completed
else:
# failed to make progress.
lowerCamelCase_ = True
self.reset()
return stepped, completed, reset
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = False
lowerCamelCase_ = 0
def snake_case ( self ):
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def snake_case ( self , UpperCamelCase=False ):
"""simple docstring"""
lowerCamelCase_ = PhrasalConstraint(self.token_ids )
if stateful:
lowerCamelCase_ = self.seqlen
lowerCamelCase_ = self.fulfilled_idx
lowerCamelCase_ = self.completed
return new_constraint
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=True ):
"""simple docstring"""
lowerCamelCase_ = max([len(UpperCamelCase ) for one in nested_token_ids] )
lowerCamelCase_ = {}
for token_ids in nested_token_ids:
lowerCamelCase_ = root
for tidx, token_id in enumerate(UpperCamelCase ):
if token_id not in level:
lowerCamelCase_ = {}
lowerCamelCase_ = level[token_id]
if no_subsets and self.has_subsets(UpperCamelCase , UpperCamelCase ):
raise ValueError(
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
f''' {nested_token_ids}.''' )
lowerCamelCase_ = root
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.trie
for current_token in current_seq:
lowerCamelCase_ = start[current_token]
lowerCamelCase_ = list(start.keys() )
return next_tokens
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.next_tokens(UpperCamelCase )
return len(UpperCamelCase ) == 0
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = list(root.values() )
if len(UpperCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCamelCase ) for nn in next_nodes] )
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.count_leaves(UpperCamelCase )
return len(UpperCamelCase ) != leaf_count
class snake_case ( lowercase ):
"""simple docstring"""
def __init__( self , UpperCamelCase ):
"""simple docstring"""
super(UpperCamelCase , self ).__init__()
if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(UpperCamelCase , UpperCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
lowerCamelCase_ = DisjunctiveTrie(UpperCamelCase )
lowerCamelCase_ = nested_token_ids
lowerCamelCase_ = self.trie.max_height
lowerCamelCase_ = []
lowerCamelCase_ = False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.trie.next_tokens(self.current_seq )
if len(UpperCamelCase ) == 0:
return None
else:
return token_list
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
lowerCamelCase_ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
if self.does_advance(UpperCamelCase ):
self.current_seq.append(UpperCamelCase )
lowerCamelCase_ = True
else:
lowerCamelCase_ = True
self.reset()
lowerCamelCase_ = self.trie.reached_leaf(self.current_seq )
lowerCamelCase_ = completed
return stepped, completed, reset
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = False
lowerCamelCase_ = []
def snake_case ( self ):
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def snake_case ( self , UpperCamelCase=False ):
"""simple docstring"""
lowerCamelCase_ = DisjunctiveConstraint(self.token_ids )
if stateful:
lowerCamelCase_ = self.seqlen
lowerCamelCase_ = self.current_seq
lowerCamelCase_ = self.completed
return new_constraint
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = constraints
# max # of steps required to fulfill a given constraint
lowerCamelCase_ = max([c.seqlen for c in constraints] )
lowerCamelCase_ = len(UpperCamelCase )
lowerCamelCase_ = False
self.init_state()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = []
lowerCamelCase_ = None
lowerCamelCase_ = [constraint.copy(stateful=UpperCamelCase ) for constraint in self.constraints]
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowerCamelCase_ = constraint.advance()
if isinstance(UpperCamelCase , UpperCamelCase ):
token_list.append(UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
token_list.extend(UpperCamelCase )
else:
lowerCamelCase_ = self.inprogress_constraint.advance()
if isinstance(UpperCamelCase , UpperCamelCase ):
token_list.append(UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
token_list.extend(UpperCamelCase )
if len(UpperCamelCase ) == 0:
return None
else:
return token_list
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowerCamelCase_ ,lowerCamelCase_ = self.add(UpperCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
lowerCamelCase_ ,lowerCamelCase_ = False, False
if self.completed:
lowerCamelCase_ = True
lowerCamelCase_ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = self.inprogress_constraint.update(UpperCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase ) )
lowerCamelCase_ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowerCamelCase_ = None
if len(self.pending_constraints ) == 0:
# we're done!
lowerCamelCase_ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCamelCase ):
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = pending_constraint.update(UpperCamelCase )
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true." )
if complete:
self.complete_constraints.append(UpperCamelCase )
lowerCamelCase_ = None
if not complete and stepped:
lowerCamelCase_ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowerCamelCase_ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowerCamelCase_ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def snake_case ( self , UpperCamelCase=True ):
"""simple docstring"""
lowerCamelCase_ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowerCamelCase_ = [
constraint.copy(stateful=UpperCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowerCamelCase_ = self.inprogress_constraint.copy(stateful=UpperCamelCase )
lowerCamelCase_ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 55 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = []
lowerCamelCase_ = 11
lowerCamelCase_ = int("1" + "0" * digit_len )
for num in range(UpperCAmelCase_ , UpperCAmelCase_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(UpperCAmelCase_ , UpperCAmelCase_ ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
lowerCamelCase_ = 10
return solutions
def __snake_case ( UpperCAmelCase_ : int = 2 ):
lowerCamelCase_ = 1.0
for fraction in fraction_list(UpperCAmelCase_ ):
lowerCamelCase_ = Fraction(UpperCAmelCase_ )
result *= frac.denominator / frac.numerator
return int(UpperCAmelCase_ )
if __name__ == "__main__":
print(solution())
| 55 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[int] = [0 for i in range(r + 1 )]
# nc0 = 1
_snake_case : Tuple = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_snake_case : int = min(a__ , a__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 369 |
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCAmelCase__ (snake_case__ : int = 3 ):
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(snake_case__ ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
_snake_case : Optional[int] = QuantumRegister(snake_case__ , """qr""" )
_snake_case : List[Any] = ClassicalRegister(snake_case__ , """cr""" )
_snake_case : Optional[int] = QuantumCircuit(snake_case__ , snake_case__ )
_snake_case : Dict = number_of_qubits
for i in range(snake_case__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(snake_case__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , snake_case__ , snake_case__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(snake_case__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(snake_case__ , snake_case__ )
# simulate with 10000 shots
_snake_case : Optional[int] = Aer.get_backend("""qasm_simulator""" )
_snake_case : Optional[int] = execute(snake_case__ , snake_case__ , shots=1_00_00 )
return job.result().get_counts(snake_case__ )
if __name__ == "__main__":
print(
F'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 132 | 0 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_lowercase : Tuple =models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_lowercase : List[str] =tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_lowercase : Optional[int] =tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
_lowercase : List[Any] =train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
_lowercase : List[Any] =test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
_lowercase : Tuple =tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
_lowercase : str =tf.keras.preprocessing.image.img_to_array(test_image)
_lowercase : Dict =np.expand_dims(test_image, axis=0)
_lowercase : Union[str, Any] =classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_lowercase : str ="Normal"
if result[0][0] == 1:
_lowercase : Tuple ="Abnormality detected"
| 170 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase : List[str] = 1_6
_lowerCAmelCase : List[Any] = 3_2
def lowerCAmelCase ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
UpperCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCAmelCase : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase__ = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase__ = 8
else:
UpperCAmelCase__ = None
return tokenizer.pad(
_lowerCAmelCase , padding="longest" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase__ = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
UpperCAmelCase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCAmelCase : int = mocked_dataloaders # noqa: F811
def lowerCAmelCase ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCAmelCase ) == "1":
UpperCAmelCase__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCAmelCase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ = config["lr"]
UpperCAmelCase__ = int(config["num_epochs"] )
UpperCAmelCase__ = int(config["seed"] )
UpperCAmelCase__ = int(config["batch_size"] )
set_seed(_lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__ = AdamW(params=model.parameters() , lr=_lowerCAmelCase )
# Instantiate scheduler
UpperCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCAmelCase__ = os.path.split(_lowerCAmelCase )[-1].split("." )[0]
accelerator.init_trackers(_lowerCAmelCase , _lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCAmelCase__ = 0
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase__ = model(**_lowerCAmelCase )
UpperCAmelCase__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ = model(**_lowerCAmelCase )
UpperCAmelCase__ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase__ , UpperCAmelCase__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
UpperCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _lowerCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(_lowerCAmelCase ),
"epoch": epoch,
} , step=_lowerCAmelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=_lowerCAmelCase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 169 | 0 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def A ( a_ ) -> List[str]:
if not is_accelerate_available():
return method
__UpperCamelCase : List[Any] =version.parse(accelerate.__version__ ).base_version
if version.parse(UpperCamelCase__ ) < version.parse('0.17.0' ):
return method
def wrapper(self ,*a_ ,**a_ ):
if hasattr(self ,'_hf_hook' ) and hasattr(self._hf_hook ,'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self ,*UpperCamelCase__ ,**UpperCamelCase__ )
return wrapper
| 363 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ = None ):
"""simple docstring"""
if components is None:
__UpperCamelCase : Dict =[]
__UpperCamelCase : List[str] =list(lowerCamelCase__ )
def __len__( self ):
"""simple docstring"""
return len(self.__components )
def __str__( self ):
"""simple docstring"""
return "(" + ",".join(map(lowerCamelCase__ , self.__components ) ) + ")"
def __add__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =len(self )
if size == len(lowerCamelCase__ ):
__UpperCamelCase : Any =[self.__components[i] + other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )]
return Vector(lowerCamelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[str] =len(self )
if size == len(lowerCamelCase__ ):
__UpperCamelCase : Union[str, Any] =[self.__components[i] - other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )]
return Vector(lowerCamelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , lowerCamelCase__ ):
"""simple docstring"""
...
@overload
def __mul__( self , lowerCamelCase__ ):
"""simple docstring"""
...
def __mul__( self , lowerCamelCase__ ):
"""simple docstring"""
if isinstance(lowerCamelCase__ , (float, int) ):
__UpperCamelCase : str =[c * other for c in self.__components]
return Vector(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(self ) == len(lowerCamelCase__ ):
__UpperCamelCase : Tuple =len(self )
__UpperCamelCase : Union[str, Any] =[self.__components[i] * other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )]
return sum(lowerCamelCase__ )
else: # error case
raise Exception('invalid operand!' )
def __lowercase ( self ):
"""simple docstring"""
return Vector(self.__components )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase : List[Any] =value
def __lowercase ( self ):
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase : Tuple =[c**2 for c in self.__components]
return math.sqrt(sum(lowerCamelCase__ ) )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self * other
__UpperCamelCase : str =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def A ( a_ ) -> Vector:
assert isinstance(a_ ,a_ )
return Vector([0] * dimension )
def A ( a_ ,a_ ) -> Vector:
assert isinstance(a_ ,a_ ) and (isinstance(a_ ,a_ ))
__UpperCamelCase : Tuple =[0] * dimension
__UpperCamelCase : List[str] =1
return Vector(a_ )
def A ( a_ ,a_ ,a_ ) -> Vector:
assert (
isinstance(a_ ,a_ )
and isinstance(a_ ,a_ )
and (isinstance(a_ ,(int, float) ))
)
return x * scalar + y
def A ( a_ ,a_ ,a_ ) -> Vector:
random.seed(a_ )
__UpperCamelCase : List[Any] =[random.randint(a_ ,a_ ) for _ in range(a_ )]
return Vector(a_ )
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : str =matrix
__UpperCamelCase : List[str] =w
__UpperCamelCase : int =h
def __str__( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , lowerCamelCase__ ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase : int =[]
for i in range(self.__height ):
__UpperCamelCase : str =[
self.__matrix[i][j] + other.component(lowerCamelCase__ , lowerCamelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCamelCase__ )
return Matrix(lowerCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , lowerCamelCase__ ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase : str =[]
for i in range(self.__height ):
__UpperCamelCase : Optional[int] =[
self.__matrix[i][j] - other.component(lowerCamelCase__ , lowerCamelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCamelCase__ )
return Matrix(lowerCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , lowerCamelCase__ ):
"""simple docstring"""
...
@overload
def __mul__( self , lowerCamelCase__ ):
"""simple docstring"""
...
def __mul__( self , lowerCamelCase__ ):
"""simple docstring"""
if isinstance(lowerCamelCase__ , lowerCamelCase__ ): # matrix-vector
if len(lowerCamelCase__ ) == self.__width:
__UpperCamelCase : Dict =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase : Optional[Any] =[
self.__matrix[i][j] * other.component(lowerCamelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCamelCase__ , sum(lowerCamelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(lowerCamelCase__ , (int, float) ): # matrix-scalar
__UpperCamelCase : Any =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCamelCase__ , self.__width , self.__height )
return None
def __lowercase ( self ):
"""simple docstring"""
return self.__height
def __lowercase ( self ):
"""simple docstring"""
return self.__width
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase : Tuple =value
else:
raise Exception('change_component: indices out of bounds' )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase : Any =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCamelCase__ ) ):
__UpperCamelCase : Optional[Any] =minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCamelCase__ , lowerCamelCase__ )
else:
raise Exception('Indices out of bounds' )
def __lowercase ( self ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase : Tuple =[
self.__matrix[0][y] * self.cofactor(0 , lowerCamelCase__ ) for y in range(self.__width )
]
return sum(lowerCamelCase__ )
def A ( a_ ) -> Matrix:
__UpperCamelCase : list[list[float]] =[[0] * n for _ in range(a_ )]
return Matrix(a_ ,a_ ,a_ )
def A ( a_ ,a_ ,a_ ,a_ ) -> Matrix:
random.seed(a_ )
__UpperCamelCase : list[list[float]] =[
[random.randint(a_ ,a_ ) for _ in range(a_ )] for _ in range(a_ )
]
return Matrix(a_ ,a_ ,a_ )
| 245 | 0 |
"""simple docstring"""
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : int ,A_ : Tuple ,A_ : List[str] ) -> Tuple:
super().__init__()
self.register_modules(unet=A_ ,scheduler=A_ )
@torch.no_grad()
def __call__( self : Optional[Any] ,A_ : int = 1 ,A_ : Optional[torch.Generator] = None ,A_ : int = 50 ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Tuple ,) -> Union[ImagePipelineOutput, Tuple]:
A = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) ,generator=A_ ,)
A = image.to(self.device )
# set step values
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A = self.unet(A_ ,A_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A = self.scheduler.step(A_ ,A_ ,A_ ).prev_sample
A = (image / 2 + 0.5).clamp(0 ,1 )
A = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
A = self.numpy_to_pil(A_ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=A_ ), "This is a local test" | 74 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any]=False ) -> Tuple:
"""simple docstring"""
try:
__UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__UpperCamelCase = strtobool(__lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
a__ : str =parse_flag_from_env('''RUN_SLOW''', default=False)
a__ : Union[str, Any] =parse_flag_from_env('''RUN_REMOTE''', default=False)
a__ : List[str] =parse_flag_from_env('''RUN_LOCAL''', default=True)
a__ : Optional[int] =parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
a__ : Any =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
a__ : Optional[int] =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
a__ : List[str] =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
a__ : Any =pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
a__ : Tuple =pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
a__ : Union[str, Any] =pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
a__ : int =pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def lowercase__ ( __lowercase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
__UpperCamelCase = unittest.skip('test requires faiss' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Union[str, Any] ) -> Any:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
__UpperCamelCase = unittest.skip('test requires regex' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Tuple ) -> List[Any]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
__UpperCamelCase = unittest.skip('test requires elasticsearch' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
__UpperCamelCase = unittest.skip('test requires sqlalchemy' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : List[str] ) -> List[str]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
__UpperCamelCase = unittest.skip('test requires PyTorch' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Optional[Any] ) -> List[str]:
"""simple docstring"""
if not config.TF_AVAILABLE:
__UpperCamelCase = unittest.skip('test requires TensorFlow' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
if not config.JAX_AVAILABLE:
__UpperCamelCase = unittest.skip('test requires JAX' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : str ) -> Optional[Any]:
"""simple docstring"""
if not config.PIL_AVAILABLE:
__UpperCamelCase = unittest.skip('test requires Pillow' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Dict ) -> Any:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : int ) -> int:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : str ) -> int:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : str ) -> Any:
"""simple docstring"""
def _require_spacy_model(__lowercase : Any ):
try:
import spacy # noqa F401
spacy.load(__lowercase )
except ImportError:
return unittest.skip('test requires spacy' )(__lowercase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__lowercase ) )(__lowercase )
else:
return test_case
return _require_spacy_model
def lowercase__ ( __lowercase : Union[str, Any] ) -> str:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : List[Any] ) -> List[str]:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
__UpperCamelCase = unittest.skip('test is slow' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : List[Any] ) -> List[str]:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
__UpperCamelCase = unittest.skip('test is local' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : str ) -> List[str]:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
__UpperCamelCase = unittest.skip('test is packaged' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Optional[int] ) -> Any:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
__UpperCamelCase = unittest.skip('test requires remote' )(__lowercase )
return test_case
def lowercase__ ( *__lowercase : Optional[Any] ) -> Tuple:
"""simple docstring"""
def decorate(cls : int ):
for name, fn in cls.__dict__.items():
if callable(__lowercase ) and name.startswith('test' ):
for decorator in decorators:
__UpperCamelCase = decorator(__lowercase )
setattr(cls , __lowercase , __lowercase )
return cls
return decorate
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
pass
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =0
SCREAMING_SNAKE_CASE_ : List[Any] =1
SCREAMING_SNAKE_CASE_ : Union[str, Any] =2
@contextmanager
def lowercase__ ( __lowercase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , __lowercase : Dict=1e-16 ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = requests.Session().request
def timeout_request(__lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , **__lowercase : List[str] ):
# Change the url to an invalid url so that the connection hangs
__UpperCamelCase = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__UpperCamelCase = timeout
try:
return online_request(__lowercase , __lowercase , **__lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__UpperCamelCase = url
__UpperCamelCase = e.args[0]
__UpperCamelCase = (max_retry_error.args[0].replace('10.255.255.1' , F'''OfflineMock[{url}]''' ),)
__UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(__lowercase : int , __lowercase : List[str] , **__lowercase : Union[str, Any] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __lowercase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def lowercase__ ( *__lowercase : Any , **__lowercase : Dict ) -> Dict:
"""simple docstring"""
__UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__lowercase , **__lowercase ) as tmp_dir:
try:
os.chdir(__lowercase )
yield
finally:
os.chdir(__lowercase )
@contextmanager
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowercase__ ( __lowercase : List[str] , __lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
return deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist()
def lowercase__ ( __lowercase : str ) -> List[str]:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(__lowercase : List[Any] , *__lowercase : Tuple , **__lowercase : Union[str, Any] ):
try:
return func(*__lowercase , **__lowercase )
except HTTPError as err:
if str(__lowercase ).startswith('500' ) or str(__lowercase ).startswith('502' ):
pytest.xfail(str(__lowercase ) )
raise err
return decorator.decorator(_wrapper , __lowercase )
class snake_case :
"""simple docstring"""
def __init__( self : int , __A : Any , __A : str , __A : List[Any] ):
__UpperCamelCase = returncode
__UpperCamelCase = stdout
__UpperCamelCase = stderr
async def lowercase__ ( __lowercase : Any , __lowercase : Optional[int] ) -> str:
"""simple docstring"""
while True:
__UpperCamelCase = await stream.readline()
if line:
callback(__lowercase )
else:
break
async def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None , __lowercase : Any=None , __lowercase : Optional[Any]=None , __lowercase : int=False , __lowercase : List[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('\nRunning: ' , ' '.join(__lowercase ) )
__UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowercase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__UpperCamelCase = []
__UpperCamelCase = []
def tee(__lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : Tuple="" ):
__UpperCamelCase = line.decode('utf-8' ).rstrip()
sink.append(__lowercase )
if not quiet:
print(__lowercase , __lowercase , file=__lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __lowercase : tee(__lowercase , __lowercase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __lowercase : tee(__lowercase , __lowercase , sys.stderr , label='stderr:' ) ),
] , timeout=__lowercase , )
return _RunOutput(await p.wait() , __lowercase , __lowercase )
def lowercase__ ( __lowercase : Dict , __lowercase : Any=None , __lowercase : int=None , __lowercase : int=180 , __lowercase : int=False , __lowercase : str=True ) -> _RunOutput:
"""simple docstring"""
__UpperCamelCase = asyncio.get_event_loop()
__UpperCamelCase = loop.run_until_complete(
_stream_subprocess(__lowercase , env=__lowercase , stdin=__lowercase , timeout=__lowercase , quiet=__lowercase , echo=__lowercase ) )
__UpperCamelCase = ' '.join(__lowercase )
if result.returncode > 0:
__UpperCamelCase = '\n'.join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' )
return result
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__UpperCamelCase = re.sub(R'^gw' , '' , __lowercase , 0 , re.M )
return int(__lowercase )
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = 29500
__UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 53 | 0 |
'''simple docstring'''
import cmath
import math
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Dict = math.radians(_lowerCamelCase )
UpperCAmelCase : Optional[int] = math.radians(_lowerCamelCase )
# Convert voltage and current to rectangular form
UpperCAmelCase : Tuple = cmath.rect(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase : Optional[Any] = cmath.rect(_lowerCamelCase , _lowerCamelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
while a != 0:
UpperCAmelCase , UpperCAmelCase : Tuple = b % a, a
return b
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) != 1:
UpperCAmelCase : List[str] = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(UpperCAmelCase_ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = 1, 0, a
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = 0, 1, m
while va != 0:
UpperCAmelCase : Tuple = ua // va
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 280 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _snake_case :
def __init__( self , a__ , a__=13 , a__=30 , a__=2 , a__=3 , a__=True , a__=True , a__=32 , a__=2 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.0_2 , a__=3 , a__=None , a__=2 , ) -> List[str]:
'''simple docstring'''
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = scope
snake_case_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
snake_case_ = (image_size // patch_size) ** 2
snake_case_ = num_patches + 2
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = TFDeiTModel(config=a__ )
snake_case_ = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> str:
'''simple docstring'''
snake_case_ = TFDeiTForMaskedImageModeling(config=a__ )
snake_case_ = model(a__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = TFDeiTForMaskedImageModeling(a__ )
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(a__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.type_sequence_label_size
snake_case_ = TFDeiTForImageClassification(a__ )
snake_case_ = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = TFDeiTForImageClassification(a__ )
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : List[Any] = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : List[str] = False
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = TFDeiTModelTester(self )
snake_case_ = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , tf.keras.layers.Dense ) )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(a__ )
snake_case_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a__ )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__=False ) -> Optional[int]:
'''simple docstring'''
snake_case_ = super()._prepare_for_class(a__ , a__ , return_labels=a__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFDeiTModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=a__ , return_tensors="tf" )
# forward pass
snake_case_ = model(**a__ )
# verify the logits
snake_case_ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , a__ )
snake_case_ = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
| 85 |
def A_ ( snake_case : int ) -> None:
'''simple docstring'''
__UpperCamelCase = generate_pascal_triangle(snake_case )
for row_idx in range(snake_case ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def A_ ( snake_case : int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCamelCase = []
for current_row_idx in range(snake_case ):
__UpperCamelCase = populate_current_row(snake_case , snake_case )
triangle.append(snake_case )
return triangle
def A_ ( snake_case : list[list[int]] , snake_case : int ) -> list[int]:
'''simple docstring'''
__UpperCamelCase = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__UpperCamelCase , __UpperCamelCase = 1, 1
for current_col_idx in range(1 , snake_case ):
calculate_current_element(
snake_case , snake_case , snake_case , snake_case )
return current_row
def A_ ( snake_case : list[list[int]] , snake_case : list[int] , snake_case : int , snake_case : int , ) -> None:
'''simple docstring'''
__UpperCamelCase = triangle[current_row_idx - 1][current_col_idx - 1]
__UpperCamelCase = triangle[current_row_idx - 1][current_col_idx]
__UpperCamelCase = above_to_left_elt + above_to_right_elt
def A_ ( snake_case : int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCamelCase = [[1]]
for row_index in range(1 , snake_case ):
__UpperCamelCase = [0] + result[-1] + [0]
__UpperCamelCase = row_index + 1
# Calculate the number of distinct elements in a row
__UpperCamelCase = sum(divmod(snake_case , 2 ) )
__UpperCamelCase = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__UpperCamelCase = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__UpperCamelCase = row_first_half + row_second_half
result.append(snake_case )
return result
def A_ ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case : Callable , snake_case : int ) -> None:
__UpperCamelCase = f"{func.__name__}({value})"
__UpperCamelCase = timeit(f"__main__.{call}" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(snake_case , snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 328 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Optional[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 355 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = int(number**0.5 )
return number == sq * sq
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_UpperCAmelCase = x_den * y_den * z_den
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowercase ( _SCREAMING_SNAKE_CASE : int = 35 ):
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = 42
_UpperCAmelCase = Fraction(0 )
_UpperCAmelCase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_UpperCAmelCase = x_num * y_den + x_den * y_num
_UpperCAmelCase = x_den * y_den
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_UpperCAmelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_UpperCAmelCase = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
_UpperCAmelCase = x_num * y_num
_UpperCAmelCase = x_den * y_num + x_num * y_den
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_UpperCAmelCase = x_num * x_num * y_num * y_num
_UpperCAmelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 326 | 0 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = DistilBertTokenizer
_snake_case = DistilBertTokenizerFast
_snake_case = True
@slow
def UpperCAmelCase ( self ) -> str:
snake_case : List[Any] = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
snake_case : Tuple = tokenizer.encode("""sequence builders""" , add_special_tokens=A )
snake_case : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A )
snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(A )
snake_case : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 124 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCamelCase : Any = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
lowerCamelCase : int = parser.parse_args()
if args.check_lib:
lowerCamelCase : Optional[int] = importlib.import_module('transformers')
lowerCamelCase : List[str] = Path(transformers_module.__file__).parent
else:
lowerCamelCase : Optional[int] = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 124 | 1 |
from __future__ import annotations
__snake_case = []
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> bool:
'''simple docstring'''
for i in range(len(lowerCAmelCase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowerCAmelCase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowerCAmelCase__ , -1 , -1 ) , range(lowerCAmelCase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowerCAmelCase__ , -1 , -1 ) , range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ) ):
if board[i][j] == 1:
return False
return True
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> bool:
'''simple docstring'''
if row >= len(lowerCAmelCase__ ):
solution.append(lowerCAmelCase__ )
printboard(lowerCAmelCase__ )
print()
return True
for i in range(len(lowerCAmelCase__ ) ):
if is_safe(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase : Union[str, Any] =1
solve(lowerCAmelCase__ , row + 1 )
UpperCAmelCase : Optional[Any] =0
return False
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
for i in range(len(lowerCAmelCase__ ) ):
for j in range(len(lowerCAmelCase__ ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
__snake_case = 8
__snake_case = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 360 | import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __snake_case ( unittest.TestCase ):
__lowerCamelCase : Dict = MODEL_FOR_MASKED_LM_MAPPING
__lowerCamelCase : Optional[int] = TF_MODEL_FOR_MASKED_LM_MAPPING
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[Any] =pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' )
UpperCAmelCase : List[Any] =unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1e-05, '''token''': 3_8015, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1e-05, '''token''': 2_5506, '''token_str''': ''' accuser'''},
] , )
UpperCAmelCase : Tuple =unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1e-05,
'''token''': 3_8015,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1e-05,
'''token''': 2_5506,
'''token_str''': ''' accuser''',
},
] , )
UpperCAmelCase : Dict =unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 1_3606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2e-05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9e-05, '''token''': 2941, '''token_str''': ''' Te'''},
] , )
@require_torch
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Any =pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' )
UpperCAmelCase : List[Any] =unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2e-05, '''token''': 3_5676, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2e-05, '''token''': 1_6416, '''token_str''': '''ELS'''},
] , )
UpperCAmelCase : Union[str, Any] =unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2e-05,
'''token''': 3_5676,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2e-05, '''token''': 1_6416, '''token_str''': '''ELS'''},
] , )
UpperCAmelCase : int =unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1e-05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2e-05, '''token''': 2941, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 1_3606, '''token_str''': ''' Clara'''},
] , )
UpperCAmelCase : List[str] =unmasker('''My name is <mask> <mask>''' , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
[
{
'''score''': 2.2e-05,
'''token''': 3_5676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2e-05, '''token''': 1_6416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2e-05,
'''token''': 3_5676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2e-05, '''token''': 1_6416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[int] =pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' )
# convert model to fp16
pipe.model.half()
UpperCAmelCase : Dict =pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
@require_torch
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : str =pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' )
self.run_large_test(snake_case__ )
@slow
@require_tf
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' )
self.run_large_test(snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ ) -> int:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(snake_case__ ) , [
{'''sequence''': '''My name is John''', '''score''': 0.008, '''token''': 610, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.007, '''token''': 1573, '''token_str''': ''' Chris'''},
] , )
UpperCAmelCase : int =unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(snake_case__ ) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.251,
'''token''': 2201,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.214,
'''token''': 1_2790,
'''token_str''': ''' Lyon''',
},
] , )
UpperCAmelCase : int =unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case__ ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.005, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.000, '''token''': 1_3606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.000, '''token''': 2941, '''token_str''': ''' Te'''},
] , )
@require_torch
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Any =pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' )
UpperCAmelCase : List[str] =None
UpperCAmelCase : str =None
self.run_pipeline_test(snake_case__ , [] )
@require_tf
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[str] =pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' )
UpperCAmelCase : str =None
UpperCAmelCase : Dict =None
self.run_pipeline_test(snake_case__ , [] )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Any:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
UpperCAmelCase : Any =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
UpperCAmelCase : int =[
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Any =fill_masker.tokenizer
UpperCAmelCase : Optional[int] =fill_masker.model
UpperCAmelCase : Dict =fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
snake_case__ , [
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
] , )
UpperCAmelCase : int =fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
snake_case__ , [
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
] , )
UpperCAmelCase : Optional[Any] =fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
snake_case__ , [
[
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
],
[
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
],
] , )
with self.assertRaises(snake_case__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(snake_case__ ):
fill_masker('''This is''' )
self.run_test_top_k(snake_case__ , snake_case__ )
self.run_test_targets(snake_case__ , snake_case__ )
self.run_test_top_k_targets(snake_case__ , snake_case__ )
self.fill_mask_with_duplicate_targets_and_top_k(snake_case__ , snake_case__ )
self.fill_mask_with_multiple_masks(snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[str] =tokenizer.get_vocab()
UpperCAmelCase : List[str] =sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCAmelCase : Any =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ , targets=snake_case__ )
UpperCAmelCase : Tuple =fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
snake_case__ , [
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
] , )
UpperCAmelCase : int ={vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , snake_case__ )
UpperCAmelCase : Optional[Any] =[tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(snake_case__ ) )
# Call argument
UpperCAmelCase : Union[str, Any] =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
UpperCAmelCase : Optional[int] =fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=snake_case__ )
self.assertEqual(
snake_case__ , [
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
] , )
UpperCAmelCase : Any ={vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , snake_case__ )
UpperCAmelCase : Optional[int] =[tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(snake_case__ ) )
# Score equivalence
UpperCAmelCase : Any =fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=snake_case__ )
UpperCAmelCase : Union[str, Any] =[top_mask['''token_str'''] for top_mask in outputs]
UpperCAmelCase : Optional[int] =[top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case__ ) == set(snake_case__ ):
UpperCAmelCase : List[Any] =fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=snake_case__ )
UpperCAmelCase : List[str] =[top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(snake_case__ ) , nested_simplify(snake_case__ ) )
# Raises with invalid
with self.assertRaises(snake_case__ ):
UpperCAmelCase : int =fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(snake_case__ ):
UpperCAmelCase : int =fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[''''''] )
with self.assertRaises(snake_case__ ):
UpperCAmelCase : Any =fill_masker(f'''This is a {tokenizer.mask_token}''' , targets='''''' )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[str] =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ , top_k=2 )
UpperCAmelCase : Optional[Any] =fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
snake_case__ , [
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
] , )
UpperCAmelCase : Any =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
UpperCAmelCase : List[Any] =fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
snake_case__ , [
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
] , )
self.assertEqual(nested_simplify(snake_case__ ) , nested_simplify(snake_case__ ) )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : str =tokenizer.get_vocab()
UpperCAmelCase : int =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
# top_k=2, ntargets=3
UpperCAmelCase : Optional[Any] =sorted(vocab.keys() )[:3]
UpperCAmelCase : str =fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=snake_case__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCAmelCase : Tuple =[el['''token_str'''] for el in sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case__ ).issubset(snake_case__ ):
UpperCAmelCase : str =fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=snake_case__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(snake_case__ ) , nested_simplify(snake_case__ ) )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Dict:
'''simple docstring'''
UpperCAmelCase : str =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
UpperCAmelCase : Union[str, Any] =tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCAmelCase : List[Any] =sorted(vocab.keys() )[:3]
UpperCAmelCase : Optional[int] =[targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCAmelCase : str =fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=snake_case__ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(snake_case__ ) , 3 )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
UpperCAmelCase : Union[str, Any] =fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
snake_case__ , [
[
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
],
[
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
],
[
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
],
] , )
| 78 | 0 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCamelCase , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = _distribute_shards(**UpperCamelCase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = _split_gen_kwargs(UpperCamelCase , UpperCamelCase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(UpperCamelCase ):
_number_of_shards_in_gen_kwargs(UpperCamelCase )
else:
lowerCAmelCase__ : Union[str, Any] = _number_of_shards_in_gen_kwargs(UpperCamelCase )
assert out == expected
| 37 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
a :Optional[int] = ["text", "image", "audio"]
def _lowercase ( __lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
inputs.append(create_inputs(__lowerCAmelCase ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def _lowercase ( __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple = []
for output in outputs:
if isinstance(__lowerCAmelCase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__lowerCAmelCase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__lowerCAmelCase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class __a :
'''simple docstring'''
def _a ( self ) -> str:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , _a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE__ : Dict = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tool(*_a )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE__ : List[Any] = [outputs]
self.assertListEqual(output_types(_a ) , self.tool.outputs )
def _a ( self ) -> List[Any]:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ : Dict = self.tool(*_a )
if not isinstance(_a , _a ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
for output, output_type in zip(_a , self.tool.outputs ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_a , _a ) )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ : List[Any] = []
for _input, input_type in zip(_a , self.tool.inputs ):
if isinstance(_a , _a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tool(*_a )
if not isinstance(_a , _a ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
| 132 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowercase: Any = logging.get_logger(__name__)
_lowercase: List[Any] = {"vocab_file": "spiece.model"}
_lowercase: List[str] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<sep>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<mask>" , lowerCamelCase_=["<eop>", "<eod>"] , lowerCamelCase_ = None , **lowerCamelCase_ , ):
"""simple docstring"""
a = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
a = 3
a = do_lower_case
a = remove_space
a = keep_accents
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase_ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
a = jieba
a = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCamelCase_ (self ):
"""simple docstring"""
return len(self.sp_model )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
"""simple docstring"""
a = self.__dict__.copy()
a = None
return state
def __setstate__(self , lowerCamelCase_ ):
"""simple docstring"""
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
if self.remove_space:
a = " ".join(inputs.strip().split() )
else:
a = inputs
a = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
a = unicodedata.normalize("NFKD" , lowerCamelCase_ )
a = "".join([c for c in outputs if not unicodedata.combining(lowerCamelCase_ )] )
if self.do_lower_case:
a = outputs.lower()
return outputs
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = self.preprocess_text(lowerCamelCase_ )
a = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
a = []
for piece in pieces:
if len(lowerCamelCase_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
a = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
a = cur_pieces[1:]
else:
a = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCamelCase_ )
else:
new_pieces.append(lowerCamelCase_ )
return new_pieces
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
return self.sp_model.PieceToId(lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = "".join(lowerCamelCase_ ).replace(lowerCamelCase_ , " " ).strip()
return out_string
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1, 1]
return ([0] * len(lowerCamelCase_ )) + [1, 1]
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
a = [self.sep_token_id]
a = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a = os.path.join(
lowerCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
def UpperCamelCase_ (self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
a = super()._decode(*lowerCamelCase_ , **lowerCamelCase_ )
a = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 71 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _lowercase :
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=512 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ):
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
def UpperCamelCase_ (self ):
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ (self ):
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = LlamaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
a = True
a = LlamaModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , )
a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , )
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
a = LlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
a = True
a = True
a = LlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , use_cache=lowerCamelCase_ , )
a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a = torch.cat([input_ids, next_tokens] , dim=-1 )
a = torch.cat([input_mask, next_mask] , dim=-1 )
a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , )["hidden_states"][0]
a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , )["hidden_states"][0]
# select random slice
a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a = output_from_no_past[:, -3:, random_slice_idx].detach()
a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__A = (LlamaForCausalLM,) if is_torch_available() else ()
__A = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__A = False
__A = False
def UpperCamelCase_ (self ):
"""simple docstring"""
a = LlamaModelTester(self )
a = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(lowerCamelCase_ )
a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = "single_label_classification"
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(lowerCamelCase_ )
a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = "multi_label_classification"
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(lowerCamelCase_ )
a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
a = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def UpperCamelCase_ (self ):
"""simple docstring"""
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = ids_tensor([1, 10] , config.vocab_size )
a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a = LlamaModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
a = original_model(lowerCamelCase_ ).last_hidden_state
a = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a = {"type": scaling_type, "factor": 10.0}
a = LlamaModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
a = scaled_model(lowerCamelCase_ ).last_hidden_state
a = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-5 ) )
@require_torch
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
a = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
a = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
a = model(torch.tensor(lowerCamelCase_ ) )
# Expected mean on dim = -1
a = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
a = model(torch.tensor(lowerCamelCase_ ) )
# Expected mean on dim = -1
a = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
a = model(torch.tensor(lowerCamelCase_ ) )
a = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
a = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
a = "Simply put, the theory of relativity states that "
a = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
a = tokenizer.encode(lowerCamelCase_ , return_tensors="pt" )
a = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=lowerCamelCase_ )
# greedy generation outputs
a = model.generate(lowerCamelCase_ , max_new_tokens=64 , top_p=lowerCamelCase_ , temperature=1 , do_sample=lowerCamelCase_ )
a = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
| 71 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = BartphoTokenizer
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
def a_ ( self) -> int:
super().setUp()
snake_case_ = ['▁This', '▁is', '▁a', '▁t', 'est']
snake_case_ = dict(zip(lowerCAmelCase__, range(len(lowerCAmelCase__))))
snake_case_ = {'unk_token': '<unk>'}
snake_case_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['monolingual_vocab_file'])
with open(self.monolingual_vocab_file, 'w', encoding='utf-8') as fp:
for token in vocab_tokens:
fp.write(f'{token} {vocab_tokens[token]}\n')
snake_case_ = BartphoTokenizer(lowerCAmelCase__, self.monolingual_vocab_file, **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self, **lowerCAmelCase__) -> Union[str, Any]:
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> Union[str, Any]:
snake_case_ = 'This is a là test'
snake_case_ = 'This is a<unk><unk> test'
return input_text, output_text
def a_ ( self) -> Tuple:
snake_case_ = BartphoTokenizer(lowerCAmelCase__, self.monolingual_vocab_file, **self.special_tokens_map)
snake_case_ = 'This is a là test'
snake_case_ = '▁This ▁is ▁a ▁l à ▁t est'.split()
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokens + [tokenizer.unk_token]
snake_case_ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__), lowerCAmelCase__)
| 69 |
import random
def UpperCamelCase__( UpperCamelCase__ : list , UpperCamelCase__ : List[Any] )->tuple:
A__ , A__ , A__ = [], [], []
for element in data:
if element < pivot:
less.append(UpperCamelCase__ )
elif element > pivot:
greater.append(UpperCamelCase__ )
else:
equal.append(UpperCamelCase__ )
return less, equal, greater
def UpperCamelCase__( UpperCamelCase__ : list , UpperCamelCase__ : int )->Optional[int]:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(UpperCamelCase__ ) or index < 0:
return None
A__ = items[random.randint(0 , len(UpperCamelCase__ ) - 1 )]
A__ = 0
A__ , A__ , A__ = _partition(UpperCamelCase__ , UpperCamelCase__ )
A__ = len(UpperCamelCase__ )
A__ = len(UpperCamelCase__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(UpperCamelCase__ , UpperCamelCase__ )
# must be in larger
else:
return quick_select(UpperCamelCase__ , index - (m + count) )
| 193 | 0 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
__lowerCAmelCase = None
__lowerCAmelCase = {
'7B': 11_008,
'13B': 13_824,
'30B': 17_920,
'65B': 22_016,
'70B': 28_672,
}
__lowerCAmelCase = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=256 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
with open(lowerCAmelCase_ , """r""" ) as f:
return json.load(lowerCAmelCase_ )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with open(lowerCAmelCase_ , """w""" ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
_snake_case = os.path.join(lowerCAmelCase_ , """tmp""" )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
_snake_case = read_json(os.path.join(lowerCAmelCase_ , """params.json""" ) )
_snake_case = NUM_SHARDS[model_size]
_snake_case = params['n_layers']
_snake_case = params['n_heads']
_snake_case = n_heads // num_shards
_snake_case = params['dim']
_snake_case = dim // n_heads
_snake_case = 1_0000.0
_snake_case = 1.0 / (base ** (torch.arange(0 , lowerCAmelCase_ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_snake_case = params['n_kv_heads'] # for GQA / MQA
_snake_case = n_heads_per_shard // num_key_value_heads
_snake_case = dim // num_key_value_heads
else: # compatibility with other checkpoints
_snake_case = n_heads
_snake_case = n_heads_per_shard
_snake_case = dim
# permute for sliced rotary
def permute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=n_heads , _SCREAMING_SNAKE_CASE=dim , _SCREAMING_SNAKE_CASE=dim ):
return w.view(lowerCAmelCase_ , dima // n_heads // 2 , 2 , lowerCAmelCase_ ).transpose(1 , 2 ).reshape(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_snake_case = torch.load(os.path.join(lowerCAmelCase_ , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
_snake_case = [
torch.load(os.path.join(lowerCAmelCase_ , f"""consolidated.{i:02d}.pth""" ) , map_location="""cpu""" )
for i in range(lowerCAmelCase_ )
]
_snake_case = 0
_snake_case = {'weight_map': {}}
for layer_i in range(lowerCAmelCase_ ):
_snake_case = f"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
_snake_case = {
f"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wq.weight"""] ),
f"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wk.weight"""] ),
f"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[f"""layers.{layer_i}.attention.wv.weight"""],
f"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[f"""layers.{layer_i}.attention.wo.weight"""],
f"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w1.weight"""],
f"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w2.weight"""],
f"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w3.weight"""],
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[f"""layers.{layer_i}.attention_norm.weight"""],
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[f"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_snake_case = {
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.attention_norm.weight"""
].clone(),
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
_snake_case = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wq.weight"""].view(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(lowerCAmelCase_ )
] , dim=0 , ).reshape(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wk.weight"""].view(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(lowerCAmelCase_ )
] , dim=0 , ).reshape(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
_snake_case = torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wv.weight"""].view(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(lowerCAmelCase_ )
] , dim=0 , ).reshape(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = torch.cat(
[loaded[i][f"""layers.{layer_i}.attention.wo.weight"""] for i in range(lowerCAmelCase_ )] , dim=1 )
_snake_case = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(lowerCAmelCase_ )] , dim=0 )
_snake_case = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(lowerCAmelCase_ )] , dim=1 )
_snake_case = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(lowerCAmelCase_ )] , dim=0 )
_snake_case = inv_freq
for k, v in state_dict.items():
_snake_case = filename
param_count += v.numel()
torch.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case = f"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
_snake_case = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
_snake_case = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(lowerCAmelCase_ )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]["""output.weight"""] for i in range(lowerCAmelCase_ )] , dim=0 ),
}
for k, v in state_dict.items():
_snake_case = filename
param_count += v.numel()
torch.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
# Write configs
_snake_case = {'total_size': param_count * 2}
write_json(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , """pytorch_model.bin.index.json""" ) )
_snake_case = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
_snake_case = params['multiple_of'] if 'multiple_of' in params else 256
_snake_case = LlamaConfig(
hidden_size=lowerCAmelCase_ , intermediate_size=compute_intermediate_size(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=lowerCAmelCase_ , )
config.save_pretrained(lowerCAmelCase_ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
_snake_case = LlamaForCausalLM.from_pretrained(lowerCAmelCase_ , torch_dtype=torch.floataa , low_cpu_mem_usage=lowerCAmelCase_ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(lowerCAmelCase_ , safe_serialization=lowerCAmelCase_ )
shutil.rmtree(lowerCAmelCase_ )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Initialize the tokenizer based on the `spm` model
_snake_case = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
_snake_case = tokenizer_class(lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=lowerCAmelCase_ , help="""Whether or not to save using `safetensors`.""" )
_snake_case = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_snake_case = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , lowerCAmelCase_ )
if __name__ == "__main__":
main() | 369 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase (self ) -> Dict:
_snake_case, _snake_case = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
_snake_case, _snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=UpperCAmelCase , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
_snake_case = controlnet_params
_snake_case = """bird"""
_snake_case = jax.device_count()
_snake_case = pipe.prepare_text_inputs([prompts] * num_samples )
_snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
_snake_case = pipe.prepare_image_inputs([canny_image] * num_samples )
_snake_case = jax.random.PRNGKey(0 )
_snake_case = jax.random.split(UpperCAmelCase , jax.device_count() )
_snake_case = replicate(UpperCAmelCase )
_snake_case = shard(UpperCAmelCase )
_snake_case = shard(UpperCAmelCase )
_snake_case = pipe(
prompt_ids=UpperCAmelCase , image=UpperCAmelCase , params=UpperCAmelCase , prng_seed=UpperCAmelCase , num_inference_steps=50 , jit=UpperCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case = images[0, 253:256, 253:256, -1]
_snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def lowercase (self ) -> Optional[int]:
_snake_case, _snake_case = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
_snake_case, _snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=UpperCAmelCase , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
_snake_case = controlnet_params
_snake_case = """Chef in the kitchen"""
_snake_case = jax.device_count()
_snake_case = pipe.prepare_text_inputs([prompts] * num_samples )
_snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
_snake_case = pipe.prepare_image_inputs([pose_image] * num_samples )
_snake_case = jax.random.PRNGKey(0 )
_snake_case = jax.random.split(UpperCAmelCase , jax.device_count() )
_snake_case = replicate(UpperCAmelCase )
_snake_case = shard(UpperCAmelCase )
_snake_case = shard(UpperCAmelCase )
_snake_case = pipe(
prompt_ids=UpperCAmelCase , image=UpperCAmelCase , params=UpperCAmelCase , prng_seed=UpperCAmelCase , num_inference_steps=50 , jit=UpperCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case = images[0, 253:256, 253:256, -1]
_snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 270 | 0 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class UpperCAmelCase_ ( ctypes.Structure):
# _fields is a specific attr expected by ctypes
lowerCamelCase__ : Any = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def a_ ( ):
'''simple docstring'''
if os.name == "nt":
lowercase__ : Any = CursorInfo()
lowercase__ : Tuple = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowerCAmelCase , ctypes.byref(_lowerCAmelCase ) )
lowercase__ : List[Any] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowerCAmelCase , ctypes.byref(_lowerCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def a_ ( ):
'''simple docstring'''
if os.name == "nt":
lowercase__ : Dict = CursorInfo()
lowercase__ : List[str] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowerCAmelCase , ctypes.byref(_lowerCAmelCase ) )
lowercase__ : int = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowerCAmelCase , ctypes.byref(_lowerCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def a_ ( ):
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 77 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : List[Any] = tempfile.mkdtemp()
# fmt: off
__A : List[str] = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__A : Union[str, Any] = dict(zip(_A , range(len(_A ) ) ) )
__A : Optional[int] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__A : int = {'unk_token': '<unk>'}
__A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
__A : List[Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__A : Optional[int] = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_A , _A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A : Optional[int] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.get_tokenizer()
__A : str = self.get_rust_tokenizer()
__A : List[str] = self.get_image_processor()
__A : Optional[int] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
__A : int = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
__A : Optional[Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
__A : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : List[str] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__A : Optional[int] = self.get_image_processor(do_normalize=_A )
__A : Any = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Union[str, Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Union[str, Any] = self.prepare_image_inputs()
__A : int = image_processor(_A , return_tensors='np' )
__A : str = processor(images=_A , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ):
__A : str = self.get_image_processor()
__A : str = self.get_tokenizer()
__A : Tuple = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : str = 'lower newer'
__A : str = processor(text=_A , return_tensors='np' )
__A : List[str] = tokenizer(_A , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase_ ( self ):
__A : int = self.get_image_processor()
__A : Optional[int] = self.get_tokenizer()
__A : List[str] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Any = 'lower newer'
__A : Optional[Any] = self.prepare_image_inputs()
__A : List[Any] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Any = 'google/owlvit-base-patch32'
__A : int = OwlViTProcessor.from_pretrained(_A )
__A : Dict = ['cat', 'nasa badge']
__A : Optional[Any] = processor(text=_A )
__A : Optional[int] = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Tuple = 'google/owlvit-base-patch32'
__A : Any = OwlViTProcessor.from_pretrained(_A )
__A : Dict = [['cat', 'nasa badge'], ['person']]
__A : Dict = processor(text=_A )
__A : Optional[int] = 16
__A : Any = len(_A )
__A : Union[str, Any] = max([len(_A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'google/owlvit-base-patch32'
__A : str = OwlViTProcessor.from_pretrained(_A )
__A : Union[str, Any] = ['cat', 'nasa badge']
__A : Tuple = processor(text=_A )
__A : str = 16
__A : int = inputs['input_ids']
__A : List[Any] = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : List[str] = self.get_tokenizer()
__A : Optional[Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Optional[int] = self.prepare_image_inputs()
__A : Optional[int] = self.prepare_image_inputs()
__A : Optional[int] = processor(images=_A , query_images=_A )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : str = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Any = processor.batch_decode(_A )
__A : Tuple = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
| 280 | 0 |
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Optional[int] = torch.nn.Linear(2 , 4 )
_lowerCAmelCase : Union[str, Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
_lowerCAmelCase : Tuple = torch.optim.lr_scheduler.OneCycleLR(UpperCamelCase_ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
_lowerCAmelCase : Any = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
_lowerCAmelCase : List[str] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def _UpperCAmelCase (UpperCamelCase_ : Tuple ):
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def _UpperCAmelCase (UpperCamelCase_ : Tuple ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(UpperCamelCase_ )
class __snake_case (_a ):
@require_cuda
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
'''simple docstring'''
_lowerCAmelCase : List[str] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : Dict = Accelerator(cpu=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : List[str] = Accelerator()
_lowerCAmelCase : int = GradientState()
assert state.num_steps == 1
_lowerCAmelCase : List[str] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
_lowerCAmelCase : List[str] = False
assert state.sync_gradients is False
GradientState._reset_state()
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
'''simple docstring'''
_lowerCAmelCase : List[Any] = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = create_components()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Dict = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = create_components()
accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Dict ):
pass
with patch("""torch.cuda.set_device""" , _UpperCAmelCase ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ):
_lowerCAmelCase : Optional[int] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , """cuda:64""" )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
'''simple docstring'''
_lowerCAmelCase : int = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = create_components()
accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Any = get_signature(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_UpperCAmelCase )
# make sure random weights don't match
load_random_weights(_UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCAmelCase ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(_UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCAmelCase ) ) < 1E-3 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = create_components()
accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Tuple = get_signature(_UpperCAmelCase )
# saving hook
def save_config(_UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ):
_lowerCAmelCase : List[Any] = {"""class_name""": models[0].__class__.__name__}
with open(os.path.join(_UpperCAmelCase , """data.json""" ) , """w""" ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# loading hook
def load_config(_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict ):
with open(os.path.join(_UpperCAmelCase , """data.json""" ) , """r""" ) as f:
_lowerCAmelCase : int = json.load(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = config["""class_name"""]
_lowerCAmelCase : int = accelerator.register_save_state_pre_hook(_UpperCAmelCase )
_lowerCAmelCase : List[Any] = accelerator.register_load_state_pre_hook(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_UpperCAmelCase )
# make sure random weights don't match with hooks
load_random_weights(_UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCAmelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
_lowerCAmelCase : Dict = """random"""
# make sure loaded weights match with hooks
accelerator.load_state(_UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCAmelCase ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_UpperCAmelCase )
# make sure random weights don't match with hooks removed
load_random_weights(_UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCAmelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
_lowerCAmelCase : List[Any] = """random"""
# make sure loaded weights match with hooks removed
accelerator.load_state(_UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCAmelCase ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Dict = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = create_components()
_lowerCAmelCase : Any = None
# This should work
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.assertTrue(dummy_obj is None )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : Dict = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = create_components()
_lowerCAmelCase : Optional[Any] = [1, 2, 3]
# This should work
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(
getattr(_UpperCAmelCase , """_is_accelerate_prepared""" , _UpperCAmelCase ) , _UpperCAmelCase , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(_UpperCAmelCase , """_is_accelerate_prepared""" , _UpperCAmelCase ) , _UpperCAmelCase , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(_UpperCAmelCase , """_is_accelerate_prepared""" , _UpperCAmelCase ) , _UpperCAmelCase , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(_UpperCAmelCase , """_is_accelerate_prepared""" , _UpperCAmelCase ) , _UpperCAmelCase , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(_UpperCAmelCase , """_is_accelerate_prepared""" , _UpperCAmelCase ) , _UpperCAmelCase , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(_UpperCAmelCase , """_is_accelerate_prepared""" , _UpperCAmelCase ) , _UpperCAmelCase , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
'''simple docstring'''
from transformers import AutoModelForCausalLM
_lowerCAmelCase : int = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=_UpperCAmelCase , device_map={"""""": 0} , )
_lowerCAmelCase : int = Accelerator()
# This should work
_lowerCAmelCase : List[str] = accelerator.prepare(_UpperCAmelCase )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
from transformers import AutoModelForCausalLM
_lowerCAmelCase : Optional[Any] = Accelerator()
with init_empty_weights():
_lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
_lowerCAmelCase : Union[str, Any] = infer_auto_device_map(_UpperCAmelCase )
_lowerCAmelCase : int = """cpu"""
_lowerCAmelCase : Tuple = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , llm_inta_enable_fpaa_cpu_offload=_UpperCAmelCase )
# This should not work and get value error
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : str = accelerator.prepare(_UpperCAmelCase )
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
'''simple docstring'''
from transformers import AutoModelForCausalLM
_lowerCAmelCase : Dict = {"""distributed_type""": DistributedType.MULTI_GPU}
with init_empty_weights():
_lowerCAmelCase : List[str] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
_lowerCAmelCase : Optional[int] = infer_auto_device_map(_UpperCAmelCase )
_lowerCAmelCase : List[Any] = 1
_lowerCAmelCase : List[str] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=_UpperCAmelCase , device_map=_UpperCAmelCase , )
_lowerCAmelCase : List[Any] = Accelerator()
# This should not work and get value error
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : List[str] = accelerator.prepare(_UpperCAmelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
_lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
_lowerCAmelCase : Tuple = infer_auto_device_map(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=_UpperCAmelCase , device_map=_UpperCAmelCase , )
_lowerCAmelCase : Dict = Accelerator()
# This should work
_lowerCAmelCase : Optional[Any] = accelerator.prepare(_UpperCAmelCase )
@require_cuda
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
'''simple docstring'''
_lowerCAmelCase : Tuple = torch.nn.Linear(10 , 10 )
_lowerCAmelCase : Any = torch.optim.SGD(model.parameters() , lr=0.01 )
_lowerCAmelCase : Any = Accelerator(cpu=_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = accelerator.prepare(_UpperCAmelCase )
| 159 |
import os
from datetime import datetime as dt
from github import Github
_lowerCamelCase : List[Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = Github(os.environ["""GITHUB_TOKEN"""] )
_lowerCAmelCase : Any = g.get_repo("""huggingface/diffusers""" )
_lowerCAmelCase : Tuple = repo.get_issues(state="""open""" )
for issue in open_issues:
_lowerCAmelCase : Tuple = sorted(issue.get_comments() , key=lambda UpperCamelCase_ : i.created_at , reverse=UpperCamelCase_ )
_lowerCAmelCase : List[Any] = comments[0] if len(UpperCamelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 159 | 1 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = (KDPMaDiscreteScheduler,)
__lowerCamelCase : List[str] = 10
def UpperCAmelCase__ ( self , **snake_case__ ) -> str:
'''simple docstring'''
UpperCAmelCase : int ={
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**snake_case__ )
return config
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case__ )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =self.scheduler_classes[0]
UpperCAmelCase : Optional[int] =self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : str =self.dummy_model()
UpperCAmelCase : Optional[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : Union[str, Any] =sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : str =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : Any =model(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : int =output.prev_sample
UpperCAmelCase : Dict =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Optional[Any] =torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2
assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
UpperCAmelCase : Any =self.scheduler_classes[0]
UpperCAmelCase : Optional[int] =self.get_scheduler_config()
UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Optional[int] =self.dummy_model()
UpperCAmelCase : Union[str, Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : str =sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : Dict =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] =model(snake_case__ , snake_case__ )
UpperCAmelCase : List[str] =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : Optional[int] =output.prev_sample
UpperCAmelCase : Any =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Union[str, Any] =torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
UpperCAmelCase : List[Any] =self.scheduler_classes[0]
UpperCAmelCase : Dict =self.get_scheduler_config()
UpperCAmelCase : List[str] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ )
UpperCAmelCase : int =self.dummy_model()
UpperCAmelCase : Tuple =self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCAmelCase : Optional[Any] =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : int =model(snake_case__ , snake_case__ )
UpperCAmelCase : str =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : List[str] =output.prev_sample
UpperCAmelCase : List[str] =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Dict =torch.mean(torch.abs(snake_case__ ) )
if str(snake_case__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 348 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 348 | 1 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ : Dict = DistilBertTokenizer
UpperCAmelCase__ : Any = DistilBertTokenizerFast
UpperCAmelCase__ : int = True
@slow
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
_a : List[str] = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
_a : Optional[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
_a : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_a )
_a : Optional[int] = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 361 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase__ : Optional[datasets.Features] = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase__ : Any = PandasConfig
def __lowercase ( self ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def __lowercase ( self , _a ) -> List[Any]:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_a : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_a : Dict = data_files
if isinstance(_a , _a ):
_a : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : int = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_a : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_a : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : Any = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) )
return splits
def __lowercase ( self , _a ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_a : Optional[Any] = table_cast(_a , self.config.features.arrow_schema )
return pa_table
def __lowercase ( self , _a ) -> List[str]:
for i, file in enumerate(itertools.chain.from_iterable(_a ) ):
with open(_a , '''rb''' ) as f:
_a : str = pa.Table.from_pandas(pd.read_pickle(_a ) )
yield i, self._cast_table(_a )
| 15 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
lowercase_ = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
lowercase_ = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
lowercase_ = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def snake_case_( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def snake_case_( self , A , A ) -> List[str]:
_SCREAMING_SNAKE_CASE = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
_SCREAMING_SNAKE_CASE = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
_SCREAMING_SNAKE_CASE = evaluate(dataset=A , predictions=A )
return score
| 58 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class a_ :
'''simple docstring'''
UpperCamelCase = PegasusConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self , A , A=13 , A=7 , A=True , A=False , A=99 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=40 , A=2 , A=1 , A=0 , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = eos_token_id
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = bos_token_id
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE = prepare_pegasus_inputs_dict(A , A , A )
return config, inputs_dict
def snake_case_( self , A , A ) -> int:
_SCREAMING_SNAKE_CASE = TFPegasusModel(config=A ).get_decoder()
_SCREAMING_SNAKE_CASE = inputs_dict["""input_ids"""]
_SCREAMING_SNAKE_CASE = input_ids[:1, :]
_SCREAMING_SNAKE_CASE = inputs_dict["""attention_mask"""][:1, :]
_SCREAMING_SNAKE_CASE = inputs_dict["""head_mask"""]
_SCREAMING_SNAKE_CASE = 1
# first forward pass
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , head_mask=A , use_cache=A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A )[0]
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , past_key_values=A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A , A , rtol=1e-3 )
def lowerCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : int=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=None , ) ->int:
if attention_mask is None:
_SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = TFPegasusModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A )
def snake_case_( self ) -> List[str]:
self.config_tester.run_common_tests()
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A )
@require_sentencepiece
@require_tokenizers
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCamelCase = '''google/pegasus-xsum'''
@cached_property
def snake_case_( self ) -> List[str]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def snake_case_( self , **A ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.translate_src_text(**A )
assert self.expected_text == generated_words
def snake_case_( self , **A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , **A , padding=A , return_tensors="""tf""" )
_SCREAMING_SNAKE_CASE = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A , )
_SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A )
return generated_words
@slow
def snake_case_( self ) -> Any:
self._assert_generated_batch_equal_expected()
| 58 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'deit'
def __init__(self , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=224 , _lowerCamelCase=16 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=16 , **_lowerCamelCase , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : str = num_attention_heads
UpperCAmelCase__ : int = intermediate_size
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : str = initializer_range
UpperCAmelCase__ : Dict = layer_norm_eps
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : List[str] = patch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Union[str, Any] = qkv_bias
UpperCAmelCase__ : Optional[Any] = encoder_stride
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = version.parse('1.11' )
@property
def _a (self ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
"""simple docstring"""
return 1e-4
| 350 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (DDIMParallelScheduler,)
SCREAMING_SNAKE_CASE = (('eta', 0.0), ('num_inference_steps', 5_0))
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**_lowerCamelCase )
return config
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = self.scheduler_classes[0]
UpperCAmelCase__ : List[str] = self.get_scheduler_config(**_lowerCamelCase )
UpperCAmelCase__ : Tuple = scheduler_class(**_lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : str = 10, 0.0
UpperCAmelCase__ : Tuple = self.dummy_model()
UpperCAmelCase__ : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCamelCase )
for t in scheduler.timesteps:
UpperCAmelCase__ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : str = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
return sample
def _a (self ):
"""simple docstring"""
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowerCamelCase )
UpperCAmelCase__ : str = self.scheduler_classes[0]
UpperCAmelCase__ : Optional[Any] = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def _a (self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
self.check_over_configs(thresholding=_lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCamelCase , prediction_type=_lowerCamelCase , sample_max_value=_lowerCamelCase , )
def _a (self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=_lowerCamelCase , num_inference_steps=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_lowerCamelCase , eta=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.scheduler_classes[0]
UpperCAmelCase__ : Dict = self.get_scheduler_config()
UpperCAmelCase__ : Any = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = self.scheduler_classes[0]
UpperCAmelCase__ : str = self.get_scheduler_config()
UpperCAmelCase__ : int = scheduler_class(**_lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = 10, 0.0
scheduler.set_timesteps(_lowerCamelCase )
UpperCAmelCase__ : Tuple = self.dummy_model()
UpperCAmelCase__ : Optional[Any] = self.dummy_sample_deter
UpperCAmelCase__ : str = self.dummy_sample_deter + 0.1
UpperCAmelCase__ : Any = self.dummy_sample_deter - 0.1
UpperCAmelCase__ : Tuple = samplea.shape[0]
UpperCAmelCase__ : Dict = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCAmelCase__ : int = torch.arange(_lowerCamelCase )[0:3, None].repeat(1 , _lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCAmelCase__ : int = scheduler.batch_step_no_noise(_lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowerCamelCase )
UpperCAmelCase__ : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.full_loop()
UpperCAmelCase__ : Optional[int] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : int = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = self.full_loop(prediction_type="""v_prediction""" )
UpperCAmelCase__ : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = self.full_loop(set_alpha_to_one=_lowerCamelCase , beta_start=0.01 )
UpperCAmelCase__ : List[str] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.full_loop(set_alpha_to_one=_lowerCamelCase , beta_start=0.01 )
UpperCAmelCase__ : Optional[int] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 166 | 0 |
def A ( a_ ) -> bool:
if num < 0:
return False
__UpperCamelCase : int =num
__UpperCamelCase : int =0
while num > 0:
__UpperCamelCase : Any =rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 |
A_ :Optional[int] = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A_ :Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A_ :Optional[Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 71 | 1 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ :Union[str, Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ :Dict = json.load(f)
@require_torch
class lowercase ( unittest.TestCase ):
def A__ ( self ,A__):
return FSMTTokenizer.from_pretrained(A__)
def A__ ( self ,A__):
lowercase = FSMTForConditionalGeneration.from_pretrained(A__).to(A__)
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['''en-ru''', 2_6.0],
['''ru-en''', 2_2.0],
['''en-de''', 2_2.0],
['''de-en''', 2_9.0],
])
@slow
def A__ ( self ,A__ ,A__):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowercase = f'facebook/wmt19-{pair}'
lowercase = self.get_tokenizer(A__)
lowercase = self.get_model(A__)
lowercase = bleu_data[pair]['''src''']
lowercase = bleu_data[pair]['''tgt''']
lowercase = tokenizer(A__ ,return_tensors='''pt''' ,truncation=A__ ,padding='''longest''').to(A__)
lowercase = model.generate(
input_ids=batch.input_ids ,num_beams=8 ,)
lowercase = tokenizer.batch_decode(
A__ ,skip_special_tokens=A__ ,clean_up_tokenization_spaces=A__)
lowercase = calculate_bleu(A__ ,A__)
print(A__)
self.assertGreaterEqual(scores['''bleu'''] ,A__)
| 365 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Dict = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowercase__ :Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 97 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
if n_term == "":
return []
__UpperCamelCase =[]
for temp in range(int(SCREAMING_SNAKE_CASE__ ) ):
series.append(F'1/{temp + 1}' if series else '1' )
return series
if __name__ == "__main__":
_A = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 62 |
from __future__ import annotations
from typing import TypedDict
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str
snake_case_ : int
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> list[str]:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> BWTTransformDict:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_snake_case : Union[str, Any] = all_rotations(SCREAMING_SNAKE_CASE__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_snake_case : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(SCREAMING_SNAKE_CASE__ ),
}
return response
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_snake_case : Tuple = int(SCREAMING_SNAKE_CASE__ )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_snake_case : List[str] = [""""""] * len(SCREAMING_SNAKE_CASE__ )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
_snake_case : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
a__ = """Provide a string that I will generate its BWT transform: """
a__ = input(entry_msg).strip()
a__ = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
a__ = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
)
| 317 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
_lowerCAmelCase = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
_lowerCAmelCase = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
_lowerCAmelCase = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) ,codebase_urls=["""https://www.atticusprojectai.org/cuad"""] ,reference_urls=["""https://www.atticusprojectai.org/cuad"""] ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : Optional[Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
lowerCAmelCase__ : List[str] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
lowerCAmelCase__ : Union[str, Any] = evaluate(dataset=__UpperCAmelCase ,predictions=__UpperCAmelCase )
return score
| 371 |
'''simple docstring'''
from PIL import Image
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = image.size
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : int = image.load()
for i in range(UpperCamelCase ):
for j in range(UpperCamelCase ):
lowerCAmelCase__ : int = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(UpperCamelCase ):
for i in range(UpperCamelCase ):
lowerCAmelCase__ : Dict = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_lowerCAmelCase = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 184 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _lowerCamelCase( lowercase__ , lowercase__=False ) -> Any:
'''simple docstring'''
__lowercase= []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowercase= [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=False ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__lowercase= ''
else:
__lowercase= 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase= state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
__lowercase= state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase= in_proj_weight[
: config.hidden_size, :
]
__lowercase= in_proj_bias[: config.hidden_size]
__lowercase= in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase= in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase= in_proj_weight[
-config.hidden_size :, :
]
__lowercase= in_proj_bias[-config.hidden_size :]
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
__lowercase= ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= dct.pop(lowercase__ )
__lowercase= val
def _lowerCamelCase( ) -> str:
'''simple docstring'''
__lowercase= 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=False ) -> List[str]:
'''simple docstring'''
__lowercase= BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=lowercase__ , )
__lowercase= ViTHybridConfig(backbone_config=lowercase__ , image_size=3_8_4 , num_labels=1_0_0_0 )
__lowercase= False
# load original model from timm
__lowercase= timm.create_model(lowercase__ , pretrained=lowercase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__lowercase= timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase__ )
__lowercase= create_rename_keys(lowercase__ , lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ )
__lowercase= 'huggingface/label-files'
__lowercase= 'imagenet-1k-id2label.json'
__lowercase= json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
__lowercase= {int(lowercase__ ): v for k, v in idalabel.items()}
__lowercase= idalabel
__lowercase= {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
__lowercase= ViTHybridModel(lowercase__ ).eval()
else:
__lowercase= ViTHybridForImageClassification(lowercase__ ).eval()
model.load_state_dict(lowercase__ )
# create image processor
__lowercase= create_transform(**resolve_data_config({} , model=lowercase__ ) )
__lowercase= transform.transforms
__lowercase= {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
__lowercase= ViTHybridImageProcessor(
do_resize=lowercase__ , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase__ , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=lowercase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__lowercase= prepare_img()
__lowercase= transform(lowercase__ ).unsqueeze(0 )
__lowercase= processor(lowercase__ , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(lowercase__ , lowercase__ )
# verify logits
with torch.no_grad():
__lowercase= model(lowercase__ )
__lowercase= outputs.logits
print('Predicted class:' , logits.argmax(-1 ).item() )
if base_model:
__lowercase= timm_model.forward_features(lowercase__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase__ , outputs.pooler_output , atol=1E-3 )
else:
__lowercase= timm_model(lowercase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowercase__ )
if push_to_hub:
print(F'Pushing model and processor to the hub {vit_name}' )
model.push_to_hub(F'ybelkada/{vit_name}' )
processor.push_to_hub(F'ybelkada/{vit_name}' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
lowerCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 295 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A ( nn.Module ):
def __init__(self ):
super().__init__()
__lowercase= nn.Linear(3 , 4 )
__lowercase= nn.BatchNormad(4 )
__lowercase= nn.Linear(4 , 5 )
def _A (self , lowerCAmelCase ):
return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase ) ) )
class A ( A_ ):
def _A (self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ):
return (args[0] + 1,) + args[1:], kwargs
class A ( A_ ):
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return output + 1
class A ( unittest.TestCase ):
def _A (self ):
__lowercase= ModelForTest()
__lowercase= ModelHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(test_model._hf_hook , lowerCAmelCase )
self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(lowerCAmelCase )
self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) )
self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= ModelHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
add_hook_to_module(lowerCAmelCase , lowerCAmelCase , append=lowerCAmelCase )
self.assertEqual(isinstance(test_model._hf_hook , lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(lowerCAmelCase )
self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) )
self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= torch.randn(2 , 3 )
__lowercase= test_model(x + 1 )
__lowercase= test_model(x + 2 )
__lowercase= PreForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__lowercase= PreForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__lowercase= SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= torch.randn(2 , 3 )
__lowercase= test_model(lowerCAmelCase )
__lowercase= PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__lowercase= PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__lowercase= SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
assert torch.allclose(lowerCAmelCase , output + 2 , atol=1E-5 )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= torch.randn(2 , 3 )
__lowercase= test_model(lowerCAmelCase )
__lowercase= PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__lowercase= True
__lowercase= test_model(lowerCAmelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowerCAmelCase , AlignDevicesHook(io_same_device=lowerCAmelCase ) )
__lowercase= torch.randn(2 , 3 ).to(0 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , torch.device(0 ) )
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__lowercase= {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowercase= torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
__lowercase= {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__lowercase= 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowercase= torch.device(lowerCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , offload_buffers=lowerCAmelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__lowercase= 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowercase= torch.device(lowerCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() , offload_buffers=lowerCAmelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 295 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_conditional_detr""": [
"""CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ConditionalDetrConfig""",
"""ConditionalDetrOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ConditionalDetrFeatureExtractor"""]
lowerCamelCase__ = ["""ConditionalDetrImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConditionalDetrForObjectDetection""",
"""ConditionalDetrForSegmentation""",
"""ConditionalDetrModel""",
"""ConditionalDetrPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 307 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.999 , SCREAMING_SNAKE_CASE_="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase__ : Tuple = []
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = i / num_diffusion_timesteps
lowerCAmelCase__ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class A__ ( __magic_name__ , __magic_name__ ):
lowercase = [e.name for e in KarrasDiffusionSchedulers]
lowercase = 2
@register_to_config
def __init__( self : Union[str, Any] , a : int = 1_000 , a : float = 0.0_0_0_8_5 , a : float = 0.0_1_2 , a : str = "linear" , a : Optional[Union[np.ndarray, List[float]]] = None , a : str = "epsilon" , a : Optional[bool] = False , a : Optional[bool] = False , a : float = 1.0 , a : str = "linspace" , a : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
lowerCAmelCase__ : List[str] = torch.tensor(a , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase__ : List[str] = torch.linspace(a , a , a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : int = betas_for_alpha_bar(a , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
lowerCAmelCase__ : List[str] = betas_for_alpha_bar(a , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCAmelCase__ : int = 1.0 - self.betas
lowerCAmelCase__ : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a , a , a )
lowerCAmelCase__ : Optional[Any] = use_karras_sigmas
def _lowerCamelCase ( self : str , a : List[Any] , a : str=None ):
'''simple docstring'''
if schedule_timesteps is None:
lowerCAmelCase__ : List[str] = self.timesteps
lowerCAmelCase__ : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase__ : List[str] = 1 if len(a ) > 1 else 0
else:
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
lowerCAmelCase__ : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.index_for_timestep(a )
lowerCAmelCase__ : Any = self.sigmas[step_index]
lowerCAmelCase__ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self : List[str] , a : int , a : Union[str, torch.device] = None , a : Optional[int] = None , ):
'''simple docstring'''
lowerCAmelCase__ : Any = num_inference_steps
lowerCAmelCase__ : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase__ : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase__ : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : Dict = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase__ : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : int = (np.arange(a , 0 , -step_ratio )).round().copy().astype(a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCAmelCase__ : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase__ : List[Any] = np.log(a )
lowerCAmelCase__ : Optional[int] = np.interp(a , np.arange(0 , len(a ) ) , a )
if self.config.use_karras_sigmas:
lowerCAmelCase__ : str = self._convert_to_karras(in_sigmas=a , num_inference_steps=self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = np.array([self._sigma_to_t(a , a ) for sigma in sigmas] )
lowerCAmelCase__ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase__ : Dict = torch.from_numpy(a ).to(device=a )
lowerCAmelCase__ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase__ : Tuple = torch.from_numpy(a )
lowerCAmelCase__ : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a ).startswith('mps' ):
# mps does not support float64
lowerCAmelCase__ : Optional[Any] = timesteps.to(a , dtype=torch.floataa )
else:
lowerCAmelCase__ : Any = timesteps.to(device=a )
# empty dt and derivative
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase__ : Optional[Any] = defaultdict(a )
def _lowerCamelCase ( self : Any , a : Dict , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.log(a )
# get distribution
lowerCAmelCase__ : Tuple = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCAmelCase__ : Optional[int] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCAmelCase__ : List[str] = low_idx + 1
lowerCAmelCase__ : List[str] = log_sigmas[low_idx]
lowerCAmelCase__ : Any = log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase__ : Union[str, Any] = (low - log_sigma) / (low - high)
lowerCAmelCase__ : List[Any] = np.clip(a , 0 , 1 )
# transform interpolation to time range
lowerCAmelCase__ : List[Any] = (1 - w) * low_idx + w * high_idx
lowerCAmelCase__ : Any = t.reshape(sigma.shape )
return t
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : float = in_sigmas[-1].item()
lowerCAmelCase__ : float = in_sigmas[0].item()
lowerCAmelCase__ : Tuple = 7.0 # 7.0 is the value used in the paper
lowerCAmelCase__ : Tuple = np.linspace(0 , 1 , a )
lowerCAmelCase__ : Any = sigma_min ** (1 / rho)
lowerCAmelCase__ : Optional[Any] = sigma_max ** (1 / rho)
lowerCAmelCase__ : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.dt is None
def _lowerCamelCase ( self : List[str] , a : Union[torch.FloatTensor, np.ndarray] , a : Union[float, torch.FloatTensor] , a : Union[torch.FloatTensor, np.ndarray] , a : bool = True , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.index_for_timestep(a )
# advance index counter by 1
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index]
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCAmelCase__ : int = self.sigmas[step_index - 1]
lowerCAmelCase__ : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ : int = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ : Dict = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCAmelCase__ : int = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowerCAmelCase__ : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase__ : Dict = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase__ : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
lowerCAmelCase__ : List[Any] = derivative
lowerCAmelCase__ : str = dt
lowerCAmelCase__ : Dict = sample
else:
# 2. 2nd order / Heun's method
lowerCAmelCase__ : Union[str, Any] = (sample - pred_original_sample) / sigma_next
lowerCAmelCase__ : Union[str, Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCAmelCase__ : Dict = self.dt
lowerCAmelCase__ : Optional[int] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def _lowerCamelCase ( self : int , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a ):
# mps does not support float64
lowerCAmelCase__ : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase__ : int = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device )
lowerCAmelCase__ : Optional[Any] = timesteps.to(original_samples.device )
lowerCAmelCase__ : List[Any] = [self.index_for_timestep(a , a ) for t in timesteps]
lowerCAmelCase__ : List[str] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase__ : Any = sigma.unsqueeze(-1 )
lowerCAmelCase__ : List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps | 307 | 1 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ):
_A : List[Any] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ):
_A , _A : List[str] = emb.weight.shape
_A : Union[str, Any] = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
_A : Dict = emb.weight.data
return lin_layer
def _UpperCAmelCase (UpperCamelCase__ : int ):
_A : Tuple = torch.load(UpperCamelCase__ , map_location="cpu" )
_A : int = mam_aaa["args"] or mam_aaa["cfg"]["model"]
_A : int = mam_aaa["model"]
remove_ignore_keys_(UpperCamelCase__ )
_A : Union[str, Any] = state_dict["encoder.embed_tokens.weight"].shape[0]
_A : List[str] = MaMaaaConfig(
vocab_size=UpperCamelCase__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
_A : Optional[Any] = state_dict["decoder.embed_tokens.weight"]
_A : int = MaMaaaForConditionalGeneration(UpperCamelCase__ )
model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
_A : Any = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 11 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :List[str] = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE :Dict = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE :Optional[Any] = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] ,A : Optional[Any] ,A : Optional[int]=False ,A : int=False ,A : Union[str, Any]=False ,A : int=None ,A : Optional[Any]=None ,A : Union[str, Any]=None ,A : Optional[Any]=None ,A : Optional[Dict[str, Any]] = None ,**A : Tuple ,):
__A = {} if sp_model_kwargs is None else sp_model_kwargs
__A = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
__A = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__A = "<|endoftext|>" if eos_token is None else eos_token
__A = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__A = unk_token if pad_token is None else pad_token
__A = eos_token if bos_token is None else bos_token
else:
__A = "<pad>" if pad_token is None else pad_token
__A = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=A ,remove_space=A ,keep_accents=A ,bos_token=A ,eos_token=A ,unk_token=A ,pad_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,)
__A = do_lower_case
__A = remove_space
__A = keep_accents
__A = vocab_file
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
# Used for whitespace normalization in input texts
# fmt : off
__A = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__A = re.compile(
f'''[{''.join(map(A ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(1_27 ,1_60 ) ) + [1_60, 1_73, 82_03] ) )}]''' )
def __getstate__( self : Optional[int] ):
__A = self.__dict__.copy()
__A = None
return state
def __setstate__( self : Optional[Any] ,A : Union[str, Any] ):
__A = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
__A = {}
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCamelCase_ ( self : List[str] ):
return len(self.sp_model )
def UpperCamelCase_ ( self : int ,A : str ):
__A = self.non_printing_characters_re.sub("" ,A )
# Normalize whitespaces
__A = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
__A = unicodedata.normalize("NFC" ,A )
return text
def UpperCamelCase_ ( self : Union[str, Any] ,A : str ,**A : Optional[int] ):
__A = self.preprocess_text(A )
return self.sp_model.encode(A ,out_type=A )
def UpperCamelCase_ ( self : Any ,A : str ):
return self.sp_model.PieceToId(A )
def UpperCamelCase_ ( self : Dict ,A : int ):
return self.sp_model.IdToPiece(A )
@staticmethod
def UpperCamelCase_ ( A : str ):
return out_string
def UpperCamelCase_ ( self : str ,A : List[str] ):
__A = []
__A = ""
__A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
__A = True
__A = []
else:
current_sub_tokens.append(A )
__A = False
out_string += self.sp_model.decode(A )
return out_string
def UpperCamelCase_ ( self : str ):
__A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self : List[str] ,A : str ,A : Optional[str] = None ):
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__A = os.path.join(
A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A )
elif not os.path.isfile(self.vocab_file ):
with open(A ,"wb" ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def UpperCamelCase_ ( self : Union[str, Any] ,A : Union[str, List[str]] ,A : Union[str, bool] = False ):
if isinstance(A ,A ):
__A = self.preprocess_text(A )
__A = self.sp_model.encode(A )
else:
__A = [self.preprocess_text(A ) for t in text]
__A = self.sp_model.encode(A )
if return_tensors is True or return_tensors == "pt":
__A = torch.tensor(A )
return token_ids
def UpperCamelCase_ ( self : List[Any] ,A : Union[int, List[int]] ):
return self.sp_model.decode(A )
def UpperCamelCase_ ( self : List[str] ,A : "Conversation" ):
__A = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
__A = (
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(A ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=A )
| 15 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : List[str] = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = ['''DeiTFeatureExtractor''']
UpperCAmelCase_ : List[Any] = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 62 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Union[str, Any] = """char"""
snake_case__ : Optional[int] = """bpe"""
snake_case__ : Dict = """wp"""
UpperCAmelCase_ : List[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : List[Any] = ["""image_processor""", """char_tokenizer"""]
snake_case__ : Dict = """ViTImageProcessor"""
snake_case__ : List[str] = """MgpstrTokenizer"""
def __init__( self : Optional[int] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Dict=None , **__lowerCamelCase : Any ):
UpperCamelCase :Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __lowerCamelCase , )
UpperCamelCase :Optional[int] = kwargs.pop("""feature_extractor""" )
UpperCamelCase :List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
UpperCamelCase :Optional[int] = tokenizer
UpperCamelCase :int = AutoTokenizer.from_pretrained("""gpt2""" )
UpperCamelCase :int = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : str , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : str=None , **__lowerCamelCase : Dict ):
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
UpperCamelCase :Tuple = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if text is not None:
UpperCamelCase :Any = self.char_tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase :Dict = encodings["""input_ids"""]
return inputs
def _A ( self : Tuple , __lowerCamelCase : str ):
UpperCamelCase , UpperCamelCase , UpperCamelCase :int = sequences
UpperCamelCase :Tuple = char_preds.size(0 )
UpperCamelCase , UpperCamelCase :str = self._decode_helper(__lowerCamelCase , """char""" )
UpperCamelCase , UpperCamelCase :List[Any] = self._decode_helper(__lowerCamelCase , """bpe""" )
UpperCamelCase , UpperCamelCase :List[Any] = self._decode_helper(__lowerCamelCase , """wp""" )
UpperCamelCase :Any = []
UpperCamelCase :str = []
for i in range(__lowerCamelCase ):
UpperCamelCase :Union[str, Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
UpperCamelCase :Any = [char_strs[i], bpe_strs[i], wp_strs[i]]
UpperCamelCase :str = scores.index(max(__lowerCamelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
UpperCamelCase :Optional[Any] = {}
UpperCamelCase :Dict = final_strs
UpperCamelCase :Union[str, Any] = final_scores
UpperCamelCase :List[str] = char_strs
UpperCamelCase :Tuple = bpe_strs
UpperCamelCase :Optional[Any] = wp_strs
return out
def _A ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ):
if format == DecodeType.CHARACTER:
UpperCamelCase :List[str] = self.char_decode
UpperCamelCase :Union[str, Any] = 1
UpperCamelCase :Optional[Any] = """[s]"""
elif format == DecodeType.BPE:
UpperCamelCase :Union[str, Any] = self.bpe_decode
UpperCamelCase :str = 2
UpperCamelCase :int = """#"""
elif format == DecodeType.WORDPIECE:
UpperCamelCase :int = self.wp_decode
UpperCamelCase :Any = 102
UpperCamelCase :int = """[SEP]"""
else:
raise ValueError(F"""Format {format} is not supported.""" )
UpperCamelCase , UpperCamelCase :int = [], []
UpperCamelCase :Any = pred_logits.size(0 )
UpperCamelCase :List[Any] = pred_logits.size(1 )
UpperCamelCase , UpperCamelCase :Optional[int] = pred_logits.topk(1 , dim=-1 , largest=__lowerCamelCase , sorted=__lowerCamelCase )
UpperCamelCase :Optional[Any] = preds_index.view(-1 , __lowerCamelCase )[:, 1:]
UpperCamelCase :int = decoder(__lowerCamelCase )
UpperCamelCase , UpperCamelCase :Optional[int] = torch.nn.functional.softmax(__lowerCamelCase , dim=2 ).max(dim=2 )
UpperCamelCase :Tuple = preds_max_prob[:, 1:]
for index in range(__lowerCamelCase ):
UpperCamelCase :Tuple = preds_str[index].find(__lowerCamelCase )
UpperCamelCase :List[Any] = preds_str[index][:pred_eos]
UpperCamelCase :List[Any] = preds_index[index].cpu().tolist()
UpperCamelCase :Optional[Any] = pred_index.index(__lowerCamelCase ) if eos_token in pred_index else -1
UpperCamelCase :List[str] = preds_max_prob[index][: pred_eos_index + 1]
UpperCamelCase :List[str] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__lowerCamelCase )
conf_scores.append(__lowerCamelCase )
return dec_strs, conf_scores
def _A ( self : Optional[Any] , __lowerCamelCase : str ):
UpperCamelCase :Dict = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(__lowerCamelCase )]
return decode_strs
def _A ( self : Union[str, Any] , __lowerCamelCase : str ):
return self.bpe_tokenizer.batch_decode(__lowerCamelCase )
def _A ( self : int , __lowerCamelCase : Optional[int] ):
UpperCamelCase :Any = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(__lowerCamelCase )]
return decode_strs
| 62 | 1 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
_enforce_args(UpperCAmelCase_ , UpperCAmelCase_ )
if n == 0:
return 0
_lowerCAmelCase = float("-inf" )
for i in range(1 , n + 1 ):
_lowerCAmelCase = max(
UpperCAmelCase_ , prices[i - 1] + naive_cut_rod_recursive(n - i , UpperCAmelCase_ ) )
return max_revue
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
_enforce_args(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowerCAmelCase = float("-inf" )
for i in range(1 , n + 1 ):
_lowerCAmelCase = max(
UpperCAmelCase_ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , UpperCAmelCase_ , UpperCAmelCase_ ) , )
_lowerCAmelCase = max_revenue
return max_rev[n]
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
_enforce_args(UpperCAmelCase_ , UpperCAmelCase_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowerCAmelCase = [float("-inf" ) for _ in range(n + 1 )]
_lowerCAmelCase = 0
for i in range(1 , n + 1 ):
_lowerCAmelCase = max_rev[i]
for j in range(1 , i + 1 ):
_lowerCAmelCase = max(UpperCAmelCase_ , prices[j - 1] + max_rev[i - j] )
_lowerCAmelCase = max_revenue_i
return max_rev[n]
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
if n < 0:
_lowerCAmelCase = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(UpperCAmelCase_ )
if n > len(UpperCAmelCase_ ):
_lowerCAmelCase = (
"Each integral piece of rod must have a corresponding price. "
F'''Got n = {n} but length of prices = {len(UpperCAmelCase_ )}'''
)
raise ValueError(UpperCAmelCase_ )
def __a():
'''simple docstring'''
_lowerCAmelCase = [6, 10, 12, 15, 20, 23]
_lowerCAmelCase = len(UpperCAmelCase_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowerCAmelCase = 36
_lowerCAmelCase = top_down_cut_rod(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = bottom_up_cut_rod(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = naive_cut_rod_recursive(UpperCAmelCase_ , UpperCAmelCase_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 158 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE_ : str = parse(importlib.metadata.version('torch'))
def _snake_case ( UpperCAmelCase_ : Union[str, Version] , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
A__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = parse(importlib.metadata.version(UpperCAmelCase_ ) )
return operation(UpperCAmelCase_ , parse(UpperCAmelCase_ ) )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return compare_versions(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 0 |
"""simple docstring"""
__lowercase = {
0: """0""",
1: """1""",
2: """2""",
3: """3""",
4: """4""",
5: """5""",
6: """6""",
7: """7""",
8: """8""",
9: """9""",
10: """a""",
11: """b""",
12: """c""",
13: """d""",
14: """e""",
15: """f""",
}
def lowercase ( A_ )-> str:
'''simple docstring'''
assert type(A_ ) in (int, float) and decimal == int(A_ )
a : int = int(A_ )
a : Optional[int] = ""
a : str = False
if decimal < 0:
a : Any = True
decimal *= -1
while decimal > 0:
a , a : List[Any] = divmod(A_ , 16 )
a : Optional[int] = values[remainder] + hexadecimal
a : Tuple = "0x" + hexadecimal
if negative:
a : Tuple = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 226 |
"""simple docstring"""
__lowercase = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
__lowercase = frozenset(["""prompt""", """negative_prompt"""])
__lowercase = frozenset([])
__lowercase = frozenset(["""image"""])
__lowercase = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__lowercase = frozenset(["""image"""])
__lowercase = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
__lowercase = frozenset(["""prompt""", """image""", """negative_prompt"""])
__lowercase = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
__lowercase = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
__lowercase = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__lowercase = frozenset(["""image""", """mask_image"""])
__lowercase = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__lowercase = frozenset(["""example_image""", """image""", """mask_image"""])
__lowercase = frozenset(["""class_labels"""])
__lowercase = frozenset(["""class_labels"""])
__lowercase = frozenset(["""batch_size"""])
__lowercase = frozenset([])
__lowercase = frozenset(["""batch_size"""])
__lowercase = frozenset([])
__lowercase = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
__lowercase = frozenset(["""prompt""", """negative_prompt"""])
__lowercase = frozenset(["""input_tokens"""])
__lowercase = frozenset(["""input_tokens"""])
| 226 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class _UpperCAmelCase ( unittest.TestCase):
def __init__( self : str , lowercase_ : Any , lowercase_ : Optional[Any]=13 , lowercase_ : int=7 , lowercase_ : Optional[int]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[int]=True , lowercase_ : Optional[int]=True , lowercase_ : int=99 , lowercase_ : Dict=32 , lowercase_ : List[Any]=5 , lowercase_ : Dict=4 , lowercase_ : Any=37 , lowercase_ : Any="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : str=512 , lowercase_ : Dict=16 , lowercase_ : List[str]=2 , lowercase_ : int=0.02 , lowercase_ : Union[str, Any]=4 , ):
snake_case_ : Any = parent
snake_case_ : str = batch_size
snake_case_ : Any = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Tuple = use_attention_mask
snake_case_ : Optional[int] = use_token_type_ids
snake_case_ : List[str] = use_labels
snake_case_ : Any = vocab_size
snake_case_ : Optional[int] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : Dict = hidden_dropout_prob
snake_case_ : int = attention_probs_dropout_prob
snake_case_ : Tuple = max_position_embeddings
snake_case_ : Tuple = type_vocab_size
snake_case_ : int = type_sequence_label_size
snake_case_ : str = initializer_range
snake_case_ : Any = num_choices
def _snake_case ( self : Dict ):
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Any = None
if self.use_attention_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[Any] = None
if self.use_token_type_ids:
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Optional[int] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _snake_case ( self : Union[str, Any] ):
snake_case_ : int = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_, snake_case_ : Optional[Any] = config_and_inputs
snake_case_ : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _snake_case ( self : Dict ):
snake_case_ : Optional[Any] = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_, snake_case_ : Optional[int] = config_and_inputs
snake_case_ : str = True
snake_case_ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Optional[int] = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _snake_case ( self : int ):
snake_case_ : str = FlaxBertModelTester(self )
@slow
def _snake_case ( self : List[Any] ):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
snake_case_ : List[Any] = FlaxBertModel.from_pretrained('''bert-base-cased''' )
snake_case_ : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase_ )
| 264 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 264 | 1 |
"""simple docstring"""
import os
import numpy
import onnx
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Any = a.name
_a : str = b.name
_a : Optional[int] = """"""
_a : Any = """"""
_a : List[str] = a == b
_a : str = name_a
_a : str = name_b
return res
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Any = list(model.graph.initializer )
_a : Tuple = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_a : Tuple = inits[i].name
_a : Union[str, Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = os.path.dirname(UpperCamelCase__ )
_a : Optional[Any] = os.path.basename(UpperCamelCase__ )
_a : Tuple = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
_a : List[Any] = list(model.graph.initializer )
_a : Dict = set()
_a : Optional[int] = {}
_a : str = []
_a : Optional[int] = 0
for i in range(len(UpperCamelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCamelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCamelCase__ )
dup_set.add(UpperCamelCase__ )
_a : Union[str, Any] = inits[j].data_type
_a : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print("""unexpected data type: """ , UpperCamelCase__ )
total_reduced_size += mem_size
_a : Any = inits[i].name
_a : Any = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCamelCase__ )
else:
_a : List[Any] = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , """GB""" )
_a : Dict = sorted(UpperCamelCase__ )
_remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_a : Optional[Any] = """optimized_""" + model_file_name
_a : Optional[int] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
onnx.save(UpperCamelCase__ , UpperCamelCase__ )
return new_model
| 324 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case = logging.get_logger(__name__)
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Dict = ['''pixel_values''']
def __init__( self : Any , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : Optional[Any]=PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : List[str] , ) -> None:
_a : int = do_resize
_a : Union[str, Any] = do_rescale
_a : Any = size_divisor
_a : Any = resample
super().__init__(**UpperCAmelCase__ )
def _lowercase ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[ChannelDimension] = None , **UpperCAmelCase__ : Optional[Any] ) -> np.ndarray:
_a , _a : Tuple = get_image_size(UpperCAmelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
_a : Optional[Any] = height // size_divisor * size_divisor
_a : Union[str, Any] = width // size_divisor * size_divisor
_a : Any = resize(UpperCAmelCase__ , (new_h, new_w) , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
return image
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[ChannelDimension] = None , **UpperCAmelCase__ : Optional[int] ) -> np.ndarray:
return rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[TensorType, str]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : int , ) -> BatchFeature:
_a : Dict = do_resize if do_resize is not None else self.do_resize
_a : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
_a : str = size_divisor if size_divisor is not None else self.size_divisor
_a : Any = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
_a : List[str] = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
_a : Tuple = [to_numpy_array(UpperCAmelCase__ ) for img in images]
if do_resize:
_a : Optional[int] = [self.resize(UpperCAmelCase__ , size_divisor=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
_a : str = [self.rescale(UpperCAmelCase__ , scale=1 / 255 ) for image in images]
_a : Any = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
_a : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 324 | 1 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> list[int]:
if length <= 0 or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(UpperCAmelCase_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 185 |
'''simple docstring'''
A__ : Any = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 185 | 1 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A_ :int = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def A ( a_ ,a_ ,a_=None ,a_=None ,a_=None ,a_=None ,a_=None ,a_=None ,) -> Any:
if attention_mask is None:
__UpperCamelCase : Tuple =np.where(input_ids != config.pad_token_id ,1 ,0 )
if decoder_attention_mask is None:
__UpperCamelCase : Dict =np.where(decoder_input_ids != config.pad_token_id ,1 ,0 )
if head_mask is None:
__UpperCamelCase : Tuple =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase : Union[str, Any] =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase : Union[str, Any] =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=99 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=0.02 , ):
"""simple docstring"""
__UpperCamelCase : Dict =parent
__UpperCamelCase : Any =batch_size
__UpperCamelCase : str =seq_length
__UpperCamelCase : str =is_training
__UpperCamelCase : Dict =use_labels
__UpperCamelCase : Optional[int] =vocab_size
__UpperCamelCase : Tuple =hidden_size
__UpperCamelCase : Union[str, Any] =num_hidden_layers
__UpperCamelCase : Dict =num_attention_heads
__UpperCamelCase : int =intermediate_size
__UpperCamelCase : Union[str, Any] =hidden_act
__UpperCamelCase : List[str] =hidden_dropout_prob
__UpperCamelCase : Any =attention_probs_dropout_prob
__UpperCamelCase : List[Any] =max_position_embeddings
__UpperCamelCase : int =eos_token_id
__UpperCamelCase : Dict =pad_token_id
__UpperCamelCase : Tuple =bos_token_id
__UpperCamelCase : Optional[int] =initializer_range
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__UpperCamelCase : List[Any] =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__UpperCamelCase : Optional[Any] =shift_tokens_right(lowerCamelCase__ , 1 , 2 )
__UpperCamelCase : List[str] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCamelCase__ , )
__UpperCamelCase : Dict =prepare_blenderbot_inputs_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, inputs_dict
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =self.prepare_config_and_inputs()
return config, inputs_dict
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[str] =20
__UpperCamelCase : List[Any] =model_class_name(lowerCamelCase__ )
__UpperCamelCase : List[Any] =model.encode(inputs_dict['input_ids'] )
__UpperCamelCase : List[str] =(
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__UpperCamelCase : int =model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__UpperCamelCase : Optional[Any] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase : int =model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
__UpperCamelCase : Any =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase : Optional[int] =model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase__ , )
__UpperCamelCase : int =model.decode(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Any =20
__UpperCamelCase : List[str] =model_class_name(lowerCamelCase__ )
__UpperCamelCase : Dict =model.encode(inputs_dict['input_ids'] )
__UpperCamelCase : Any =(
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__UpperCamelCase : Dict =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCamelCase : str =model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase : int =model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
__UpperCamelCase : List[Any] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase : int =model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
__UpperCamelCase : str =model.decode(lowerCamelCase__ , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ )
__UpperCamelCase : int =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
@require_flax
class __A ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : List[Any] =9_9
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__UpperCamelCase : List[str] =input_ids.shape[0]
__UpperCamelCase : Dict =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self._get_config_and_data()
__UpperCamelCase : Optional[int] =FlaxBlenderbotForConditionalGeneration(lowerCamelCase__ )
__UpperCamelCase : Dict =lm_model(input_ids=lowerCamelCase__ )
__UpperCamelCase : Tuple =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__UpperCamelCase : Any =FlaxBlenderbotForConditionalGeneration(lowerCamelCase__ )
__UpperCamelCase : List[Any] =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__UpperCamelCase : Dict =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__UpperCamelCase : List[str] =lm_model(input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =(*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__UpperCamelCase : Union[str, Any] =shift_tokens_right(lowerCamelCase__ , 1 , 2 )
__UpperCamelCase : int =np.equal(lowerCamelCase__ , 1 ).astype(np.floataa ).sum()
__UpperCamelCase : Optional[Any] =np.equal(lowerCamelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCamelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __A ( a , unittest.TestCase , a ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =True
UpperCamelCase__ : Tuple =(
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCamelCase__ : List[Any] =(FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =FlaxBlenderbotModelTester(self )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase : Tuple =self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple =model_class(lowerCamelCase__ )
@jax.jit
def encode_jitted(lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
return model.encode(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
with self.subTest('JIT Enabled' ):
__UpperCamelCase : List[Any] =encode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__UpperCamelCase : Any =encode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase : Tuple =model_class(lowerCamelCase__ )
__UpperCamelCase : List[Any] =model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__UpperCamelCase : List[Any] ={
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
return model.decode(
decoder_input_ids=lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , encoder_outputs=lowerCamelCase__ , )
with self.subTest('JIT Enabled' ):
__UpperCamelCase : Any =decode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__UpperCamelCase : Dict =decode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__UpperCamelCase : List[str] =model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__UpperCamelCase : Dict =np.ones((1, 1) ) * model.config.eos_token_id
__UpperCamelCase : int =model(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] ={'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
__UpperCamelCase : Optional[Any] ={'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
__UpperCamelCase : int =FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=lowerCamelCase__ )
__UpperCamelCase : Tuple =BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
__UpperCamelCase : Optional[int] =['Sam']
__UpperCamelCase : Optional[int] =tokenizer(lowerCamelCase__ , return_tensors='jax' )
__UpperCamelCase : Tuple =model.generate(**lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : Optional[Any] ='Sam is a great name. It means "sun" in Gaelic.'
__UpperCamelCase : Optional[int] =tokenizer.batch_decode(lowerCamelCase__ , **lowerCamelCase__ )
assert generated_txt[0].strip() == tgt_text
| 362 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def A ( a_ ,a_ ,a_ ,a_ ,a_ ) -> Optional[int]:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__UpperCamelCase : Optional[int] =TapasConfig.from_json_file(a_ )
# set absolute/relative position embeddings parameter
__UpperCamelCase : str =reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__UpperCamelCase : Optional[Any] =TapasForQuestionAnswering(config=a_ )
elif task == "WTQ":
# run_task_main.py hparams
__UpperCamelCase : Optional[int] =4
__UpperCamelCase : Optional[Any] =True
# hparam_utils.py hparams
__UpperCamelCase : int =0.664_694
__UpperCamelCase : Any =0.207_951
__UpperCamelCase : Tuple =0.121_194
__UpperCamelCase : List[str] =True
__UpperCamelCase : Dict =True
__UpperCamelCase : Optional[Any] =False
__UpperCamelCase : Optional[int] =0.0_352_513
__UpperCamelCase : Optional[Any] =TapasForQuestionAnswering(config=a_ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__UpperCamelCase : List[Any] =4
__UpperCamelCase : List[str] =False
# hparam_utils.py hparams
__UpperCamelCase : List[str] =36.4_519
__UpperCamelCase : Dict =0.903_421
__UpperCamelCase : List[Any] =222.088
__UpperCamelCase : Optional[Any] =True
__UpperCamelCase : Optional[int] =True
__UpperCamelCase : Dict =True
__UpperCamelCase : Dict =0.763_141
__UpperCamelCase : Union[str, Any] =TapasForQuestionAnswering(config=a_ )
elif task == "TABFACT":
__UpperCamelCase : List[Any] =TapasForSequenceClassification(config=a_ )
elif task == "MLM":
__UpperCamelCase : Optional[Any] =TapasForMaskedLM(config=a_ )
elif task == "INTERMEDIATE_PRETRAINING":
__UpperCamelCase : Optional[Any] =TapasModel(config=a_ )
else:
raise ValueError(F'Task {task} not supported.' )
print(F'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(a_ ,a_ ,a_ )
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(a_ )
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}' )
__UpperCamelCase : Optional[Any] =TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' ,model_max_length=512 )
tokenizer.save_pretrained(a_ )
print('Used relative position embeddings:' ,model.config.reset_position_index_per_cell )
if __name__ == "__main__":
A_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A_ :Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 245 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _snake_case :
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Any=13 ,SCREAMING_SNAKE_CASE__ : List[Any]=10 ,SCREAMING_SNAKE_CASE__ : Tuple=3 ,SCREAMING_SNAKE_CASE__ : List[str]=2 ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : List[Any]=32 ,SCREAMING_SNAKE_CASE__ : List[Any]=5 ,SCREAMING_SNAKE_CASE__ : Optional[int]=4 ,SCREAMING_SNAKE_CASE__ : str=37 ,SCREAMING_SNAKE_CASE__ : List[str]="gelu" ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=10 ,SCREAMING_SNAKE_CASE__ : str=0.02 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.9 ,SCREAMING_SNAKE_CASE__ : int=None ,):
SCREAMING_SNAKE_CASE:List[str] = parent
SCREAMING_SNAKE_CASE:Tuple = batch_size
SCREAMING_SNAKE_CASE:Tuple = image_size
SCREAMING_SNAKE_CASE:Any = num_channels
SCREAMING_SNAKE_CASE:Tuple = patch_size
SCREAMING_SNAKE_CASE:Union[str, Any] = tubelet_size
SCREAMING_SNAKE_CASE:str = num_frames
SCREAMING_SNAKE_CASE:Any = is_training
SCREAMING_SNAKE_CASE:Tuple = use_labels
SCREAMING_SNAKE_CASE:List[Any] = hidden_size
SCREAMING_SNAKE_CASE:int = num_hidden_layers
SCREAMING_SNAKE_CASE:Tuple = num_attention_heads
SCREAMING_SNAKE_CASE:int = intermediate_size
SCREAMING_SNAKE_CASE:Optional[int] = hidden_act
SCREAMING_SNAKE_CASE:Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE:Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE:Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE:Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE:int = mask_ratio
SCREAMING_SNAKE_CASE:Union[str, Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
SCREAMING_SNAKE_CASE:List[str] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE:Optional[Any] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
SCREAMING_SNAKE_CASE:str = int(mask_ratio * self.seq_length )
def __UpperCamelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE:Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE:Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE:Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Optional[Any] ):
return VideoMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,tubelet_size=self.tubelet_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_UpperCamelCase ,initializer_range=self.initializer_range ,)
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : str ):
SCREAMING_SNAKE_CASE:Dict = VideoMAEModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE:Dict = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : str ):
SCREAMING_SNAKE_CASE:Dict = VideoMAEForPreTraining(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
SCREAMING_SNAKE_CASE:Optional[int] = torch.ones((self.num_masks,) )
SCREAMING_SNAKE_CASE:List[Any] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
SCREAMING_SNAKE_CASE:Tuple = mask.expand(self.batch_size ,-1 ).bool()
SCREAMING_SNAKE_CASE:Tuple = model(_UpperCamelCase ,_UpperCamelCase )
# model only returns predictions for masked patches
SCREAMING_SNAKE_CASE:Tuple = mask.sum().item()
SCREAMING_SNAKE_CASE:Optional[Any] = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_masked_patches, decoder_num_labels) )
def __UpperCamelCase ( self : str ):
SCREAMING_SNAKE_CASE:List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE:List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE:str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_A : List[str] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_A : List[Any] = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : Dict = False
_A : Optional[int] = False
_A : Optional[Any] = False
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:int = VideoMAEModelTester(self )
SCREAMING_SNAKE_CASE:Union[str, Any] = ConfigTester(self ,config_class=_UpperCamelCase ,has_text_modality=_UpperCamelCase ,hidden_size=37 )
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Any=False ):
SCREAMING_SNAKE_CASE:Any = copy.deepcopy(_UpperCamelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.ones((self.model_tester.num_masks,) )
SCREAMING_SNAKE_CASE:Dict = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
SCREAMING_SNAKE_CASE:List[str] = mask.expand(self.model_tester.batch_size ,-1 ).bool()
SCREAMING_SNAKE_CASE:Any = bool_masked_pos.to(_UpperCamelCase )
if return_labels:
if model_class in [
*get_values(_UpperCamelCase ),
]:
SCREAMING_SNAKE_CASE:Any = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_UpperCamelCase )
return inputs_dict
def __UpperCamelCase ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def __UpperCamelCase ( self : List[str] ):
pass
def __UpperCamelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE:Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE:List[str] = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
SCREAMING_SNAKE_CASE:Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase ,nn.Linear ) )
def __UpperCamelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE:str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE:List[Any] = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE:Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE:str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE:List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,_UpperCamelCase )
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE:int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCamelCase )
@slow
def __UpperCamelCase ( self : Any ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE:int = VideoMAEModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __UpperCamelCase ( self : Any ):
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE:Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE:Optional[int] = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE:List[str] = self.model_tester.seq_length - self.model_tester.num_masks
SCREAMING_SNAKE_CASE:List[Any] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
SCREAMING_SNAKE_CASE:int = True
SCREAMING_SNAKE_CASE:str = False
SCREAMING_SNAKE_CASE:Any = True
SCREAMING_SNAKE_CASE:Optional[Any] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE:Union[str, Any] = model(**self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ) )
SCREAMING_SNAKE_CASE:List[str] = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE:Tuple = True
SCREAMING_SNAKE_CASE:Optional[Any] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE:Dict = model(**self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ) )
SCREAMING_SNAKE_CASE:Tuple = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
SCREAMING_SNAKE_CASE:str = len(_UpperCamelCase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE:List[Any] = True
SCREAMING_SNAKE_CASE:List[str] = True
SCREAMING_SNAKE_CASE:Any = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE:Optional[int] = model(**self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ) )
self.assertEqual(out_len + 1 ,len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE:Optional[int] = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
def __UpperCamelCase ( self : List[Any] ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:Optional[int] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE:Tuple = model(**self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ) )
SCREAMING_SNAKE_CASE:List[str] = outputs.hidden_states
SCREAMING_SNAKE_CASE:List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_UpperCamelCase ) ,_UpperCamelCase )
SCREAMING_SNAKE_CASE:List[str] = self.model_tester.seq_length - self.model_tester.num_masks
SCREAMING_SNAKE_CASE:Optional[Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
SCREAMING_SNAKE_CASE:Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE:Dict = True
check_hidden_states_output(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE:List[str] = True
check_hidden_states_output(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCamelCase ( self : Tuple ):
pass
def A_ ( ):
SCREAMING_SNAKE_CASE:Tuple = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
SCREAMING_SNAKE_CASE:int = np.load(snake_case )
return list(snake_case )
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Tuple ):
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE:Tuple = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
_UpperCamelCase )
SCREAMING_SNAKE_CASE:Dict = self.default_image_processor
SCREAMING_SNAKE_CASE:Optional[Any] = prepare_video()
SCREAMING_SNAKE_CASE:Union[str, Any] = image_processor(_UpperCamelCase ,return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE:str = model(**_UpperCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE:List[Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape ,_UpperCamelCase )
SCREAMING_SNAKE_CASE:int = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCamelCase ,atol=1e-4 ) )
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE:Dict = self.default_image_processor
SCREAMING_SNAKE_CASE:Optional[Any] = prepare_video()
SCREAMING_SNAKE_CASE:List[Any] = image_processor(_UpperCamelCase ,return_tensors="pt" ).to(_UpperCamelCase )
# add boolean mask, indicating which patches to mask
SCREAMING_SNAKE_CASE:int = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" ,filename="bool_masked_pos.pt" )
SCREAMING_SNAKE_CASE:Any = torch.load(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE:List[Any] = model(**_UpperCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE:Dict = torch.Size([1, 1_408, 1_536] )
SCREAMING_SNAKE_CASE:Tuple = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] ,device=_UpperCamelCase )
self.assertEqual(outputs.logits.shape ,_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,_UpperCamelCase ,atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
SCREAMING_SNAKE_CASE:Tuple = torch.tensor([0.5_142] ,device=_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss ,_UpperCamelCase ,atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
SCREAMING_SNAKE_CASE:Dict = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ,norm_pix_loss=_UpperCamelCase ).to(
_UpperCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE:Optional[int] = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE:List[str] = torch.tensor(torch.tensor([0.6_469] ) ,device=_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss ,_UpperCamelCase ,atol=1e-4 ) )
| 139 |
'''simple docstring'''
from __future__ import annotations
import requests
def _A ( snake_case ) -> dict:
_lowercase : Dict = F'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(snake_case ).json()
def _A ( snake_case = 10 ) -> list[dict]:
_lowercase : List[Any] = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
_lowercase : List[str] = requests.get(snake_case ).json()[:max_stories]
return [get_hackernews_story(snake_case ) for story_id in story_ids]
def _A ( snake_case = 10 ) -> str:
_lowercase : Union[str, Any] = hackernews_top_stories(snake_case )
return "\n".join("* [{title}]({url})".format(**snake_case ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 250 | 0 |
def A ( _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
_UpperCAmelCase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 290 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( A ):
def _lowerCamelCase ( self : List[str]) -> int:
"""simple docstring"""
_UpperCAmelCase = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(A , 'hidden_sizes'))
self.parent.assertTrue(hasattr(A , 'neck_hidden_sizes'))
self.parent.assertTrue(hasattr(A , 'num_attention_heads'))
class __lowerCAmelCase :
def __init__( self : int , A : Tuple , A : List[str]=13 , A : List[str]=32 , A : List[str]=2 , A : List[str]=3 , A : List[Any]=6_40 , A : Any=4 , A : int="silu" , A : int=3 , A : Dict=32 , A : List[Any]=0.1 , A : Optional[Any]=0.1 , A : Optional[int]=0.1 , A : List[str]=0.0_2 , A : int=True , A : Any=True , A : List[str]=10 , A : Tuple=None , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = last_hidden_size
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = conv_kernel_size
_UpperCAmelCase = output_stride
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = classifier_dropout_prob
_UpperCAmelCase = use_labels
_UpperCAmelCase = is_training
_UpperCAmelCase = num_labels
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels)
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCamelCase ( self : str) -> int:
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : List[Any] , A : Dict , A : Tuple , A : int , A : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = MobileViTModel(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCamelCase ( self : int , A : Any , A : List[Any] , A : List[Any] , A : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileViTForImageClassification(A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A , labels=A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : int , A : Tuple , A : Optional[Any] , A : Union[str, Any] , A : List[Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileViTForSemanticSegmentation(A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_UpperCAmelCase = model(A , labels=A)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCamelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = MobileViTModelTester(self)
_UpperCAmelCase = MobileViTConfigTester(self , config_class=A , has_text_modality=A)
def _lowerCamelCase ( self : Optional[int]) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds')
def _lowerCamelCase ( self : Tuple) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings')
def _lowerCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViT does not output attentions')
def _lowerCamelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
pass
def _lowerCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(A)
_UpperCAmelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def _lowerCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A)
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
def check_hidden_states_output(A : List[str] , A : Union[str, Any] , A : int):
_UpperCAmelCase = model_class(A)
model.to(A)
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(A , A))
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = 5
self.assertEqual(len(A) , A)
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_UpperCAmelCase = 2
for i in range(len(A)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2)
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(A , A , A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(A , A , A)
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A)
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A)
@slow
def _lowerCamelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = MobileViTModel.from_pretrained(A)
self.assertIsNotNone(A)
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Tuple) -> Dict:
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small') if is_vision_available() else None
@slow
def _lowerCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small').to(A)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='pt').to(A)
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**A)
# verify the logits
_UpperCAmelCase = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , A)
_UpperCAmelCase = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3]).to(A)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4))
@slow
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
_UpperCAmelCase = model.to(A)
_UpperCAmelCase = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='pt').to(A)
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**A)
_UpperCAmelCase = outputs.logits
# verify the logits
_UpperCAmelCase = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape , A)
_UpperCAmelCase = torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
] , device=A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A , atol=1E-4))
@slow
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
_UpperCAmelCase = model.to(A)
_UpperCAmelCase = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='pt').to(A)
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**A)
_UpperCAmelCase = outputs.logits.detach().cpu()
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(50, 60)])
_UpperCAmelCase = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape , A)
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=A)
_UpperCAmelCase = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape , A)
| 290 | 1 |
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = 0
for ch in input_str:
__lowerCAmelCase = ord(lowerCAmelCase_ )
__lowerCAmelCase = pow(2, lowerCAmelCase_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284 |
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int ):
return [sentence[i : i + ngram_size] for i in range(len(lowerCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 284 | 1 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Any:
snake_case = mock.Mock()
snake_case = 500
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request', return_value=__a ) as mock_head:
snake_case = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowerCamelCase ( self ) -> List[str]:
snake_case = mock.Mock()
snake_case = 500
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request', return_value=__a ) as mock_head:
snake_case = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCamelCase ( self ) -> str:
# This test is for deprecated behavior and can be removed in v5
try:
snake_case = tempfile.mktemp()
with open(__a, 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model', __a )
snake_case = AlbertTokenizer.from_pretrained(__a )
finally:
os.remove(__a )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json', 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json', __a )
snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size, 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def _lowerCamelCase ( self ) -> Any:
snake_case = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class lowerCamelCase ( unittest.TestCase ):
snake_case_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def _lowerCamelCase ( cls ) -> Dict:
snake_case = TOKEN
HfFolder.save_token(__a )
@classmethod
def _lowerCamelCase ( cls ) -> Dict:
try:
delete_repo(token=cls._token, repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def _lowerCamelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__a, 'vocab.txt' )
with open(__a, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
snake_case = BertTokenizer(__a )
tokenizer.push_to_hub('test-tokenizer', use_auth_token=self._token )
snake_case = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a, repo_id='test-tokenizer', push_to_hub=__a, use_auth_token=self._token )
snake_case = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
def _lowerCamelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__a, 'vocab.txt' )
with open(__a, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
snake_case = BertTokenizer(__a )
tokenizer.push_to_hub('valid_org/test-tokenizer-org', use_auth_token=self._token )
snake_case = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__a, repo_id='valid_org/test-tokenizer-org', push_to_hub=__a, use_auth_token=self._token )
snake_case = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
@require_tokenizers
def _lowerCamelCase ( self ) -> Any:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__a, 'vocab.txt' )
with open(__a, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__a )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer', use_auth_token=self._token )
snake_case = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''', trust_remote_code=__a )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__a, 'vocab.txt' )
with open(__a, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
snake_case = BertTokenizerFast.from_pretrained(__a )
bert_tokenizer.save_pretrained(__a )
snake_case = CustomTokenizerFast.from_pretrained(__a )
tokenizer.push_to_hub('test-dynamic-tokenizer', use_auth_token=self._token )
snake_case = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''', trust_remote_code=__a )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, 'CustomTokenizerFast' )
snake_case = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''', use_fast=__a, trust_remote_code=__a )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, 'CustomTokenizer' )
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data, {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data, {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def _lowerCamelCase ( self ) -> List[str]:
snake_case = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ), ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ), ['[CLS]', ' This is a ', 'extra_id_100'] )
def _lowerCamelCase ( self ) -> Union[str, Any]:
snake_case = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ), ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ), ['BC', 'A'] )
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ), ['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ), ['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCamelCase ( self ) -> Union[str, Any]:
snake_case = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ), ['AB', 'C'] )
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ), ['ABC', 'D'] )
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = Trie()
snake_case = trie.cut_text('ABC', [0, 0, 2, 1, 2, 3] )
self.assertEqual(__a, ['AB', 'C'] )
| 350 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A , A , A ) -> int | float:
if len(A ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(A )
or left < -len(A )
or right >= len(A )
or right < -len(A )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
snake_case = (left + right) >> 1 # the middle
snake_case = find_max(A , A , A ) # find max in range[left, mid]
snake_case = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 332 | 0 |
"""simple docstring"""
import math
def _UpperCAmelCase ( __lowerCamelCase : int ) -> Any:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_01 ) -> Optional[Any]:
try:
_snake_case = int(_lowerCamelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
_snake_case = []
_snake_case = 2
while len(_lowerCamelCase ) < nth:
if is_prime(_lowerCamelCase ):
primes.append(_lowerCamelCase )
num += 1
else:
num += 1
return primes[len(_lowerCamelCase ) - 1]
if __name__ == "__main__":
print(F"{solution() = }")
| 288 |
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = [
(10_00, '''M'''),
(9_00, '''CM'''),
(5_00, '''D'''),
(4_00, '''CD'''),
(1_00, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : List[Any] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 1_00, """D""": 5_00, """M""": 10_00}
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : str = 0
while place < len(_lowerCamelCase ):
if (place + 1 < len(_lowerCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase_ ( _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : Any = []
for arabic, roman in ROMAN:
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : str = divmod(_lowerCamelCase , _lowerCamelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 112 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=A_ , )
assert hasattr(self , 'env' )
def __UpperCamelCase ( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = F'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}'''
# distributed data settings
UpperCamelCase = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=A_ , instance_count=A_ , instance_type=self.instance_type , debugger_hook_config=A_ , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=A_ , py_version='py36' , )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
TrainingJobAnalytics(A_ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
# create estimator
UpperCamelCase = self.create_estimator(A_ )
# run training
estimator.fit()
# result dataframe
UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , A_ )
| 110 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowercase :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = LlamaModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ )
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = LlamaModel(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , )
UpperCamelCase = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> str:
"""simple docstring"""
UpperCamelCase = LlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = LlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
# first forward pass
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , )
UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['hidden_states'][0]
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['hidden_states'][0]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-3 ) )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : str = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__lowercase : str = (LlamaForCausalLM,) if is_torch_available() else ()
__lowercase : Any = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : int = False
__lowercase : Optional[int] = False
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = LlamaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'single_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'multi_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = LlamaModel(A_ )
original_model.to(A_ )
original_model.eval()
UpperCamelCase = original_model(A_ ).last_hidden_state
UpperCamelCase = original_model(A_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = {'type': scaling_type, 'factor': 10.0}
UpperCamelCase = LlamaModel(A_ )
scaled_model.to(A_ )
scaled_model.eval()
UpperCamelCase = scaled_model(A_ ).last_hidden_state
UpperCamelCase = scaled_model(A_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A_ , A_ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) )
@require_torch
class lowercase ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
UpperCamelCase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
UpperCamelCase = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
UpperCamelCase = model(torch.tensor(A_ ) )
# Expected mean on dim = -1
UpperCamelCase = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
UpperCamelCase = model(torch.tensor(A_ ) )
# Expected mean on dim = -1
UpperCamelCase = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
UpperCamelCase = model(torch.tensor(A_ ) )
UpperCamelCase = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
# fmt: off
UpperCamelCase = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
UpperCamelCase = 'Simply put, the theory of relativity states that '
UpperCamelCase = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
UpperCamelCase = tokenizer.encode(A_ , return_tensors='pt' )
UpperCamelCase = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=A_ )
# greedy generation outputs
UpperCamelCase = model.generate(A_ , max_new_tokens=64 , top_p=A_ , temperature=1 , do_sample=A_ )
UpperCamelCase = tokenizer.decode(generated_ids[0] , skip_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 110 | 1 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = JukeboxTokenizer
__lowerCamelCase = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
import torch
A__ = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
A__ = tokenizer(**self.metas )["input_ids"]
# fmt: off
A__ = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
import torch
A__ = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
A__ = tokenizer(**self.metas )["input_ids"]
# fmt: off
A__ = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 68 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> List[str]:
'''simple docstring'''
A__ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A__ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
A__ = F'{src_lang}-{tgt_lang}'
A__ = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" )
print(F'Generating {path}' )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# make sure we are under the root of the project
lowerCAmelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCAmelCase__ = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = model_name.split("""-""")
lowerCAmelCase__ = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 68 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( __UpperCAmelCase , unittest.TestCase ):
__snake_case = KandinskyVaaControlnetPipeline
__snake_case = ['image_embeds', 'negative_image_embeds', 'hint']
__snake_case = ['image_embeds', 'negative_image_embeds', 'hint']
__snake_case = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__snake_case = False
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 100
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase_ = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCAmelCase_ = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase_ = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.dummy_unet
lowerCAmelCase_ = self.dummy_movq
lowerCAmelCase_ = DDIMScheduler(
num_train_timesteps=1000, beta_schedule='''linear''', beta_start=0.00_085, beta_end=0.012, clip_sample=UpperCamelCase__, set_alpha_to_one=UpperCamelCase__, steps_offset=1, prediction_type='''epsilon''', thresholding=UpperCamelCase__, )
lowerCAmelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=0 ):
"""simple docstring"""
lowerCAmelCase_ = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowerCAmelCase_ = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
UpperCamelCase__ )
# create hint
lowerCAmelCase_ = floats_tensor((1, 3, 64, 64), rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith('''mps''' ):
lowerCAmelCase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowerCAmelCase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowerCAmelCase_ = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = '''cpu'''
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = self.pipeline_class(**UpperCamelCase__ )
lowerCAmelCase_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ), return_dict=UpperCamelCase__, )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
lowerCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
lowerCAmelCase_ = torch.from_numpy(np.array(UpperCamelCase__ ) ).float() / 255.0
lowerCAmelCase_ = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowerCAmelCase_ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''', torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
lowerCAmelCase_ = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''', torch_dtype=torch.floataa )
lowerCAmelCase_ = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = '''A robot, 4k photo'''
lowerCAmelCase_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowerCAmelCase_ , lowerCAmelCase_ = pipe_prior(
UpperCamelCase__, generator=UpperCamelCase__, num_inference_steps=5, negative_prompt='''''', ).to_tuple()
lowerCAmelCase_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowerCAmelCase_ = pipeline(
image_embeds=UpperCamelCase__, negative_image_embeds=UpperCamelCase__, hint=UpperCamelCase__, generator=UpperCamelCase__, num_inference_steps=100, output_type='''np''', )
lowerCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCamelCase__, UpperCamelCase__ )
| 167 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase_ = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
lowerCAmelCase_ = dict(zip(UpperCamelCase__, range(len(UpperCamelCase__ ) ) ) )
lowerCAmelCase_ = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
lowerCAmelCase_ = {'''unk_token''': '''<unk>'''}
lowerCAmelCase_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
lowerCAmelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase_ = os.path.join(self.tmpdirname, UpperCamelCase__ )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
lowerCAmelCase_ = [Image.fromarray(np.moveaxis(UpperCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=UpperCamelCase__ )
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer, UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowerCAmelCase_ = self.get_image_processor(do_normalize=UpperCamelCase__, padding_value=1.0 )
lowerCAmelCase_ = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=UpperCamelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''np''' )
lowerCAmelCase_ = processor(images=UpperCamelCase__, return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCAmelCase_ = '''lower newer'''
lowerCAmelCase_ = processor(text=UpperCamelCase__ )
lowerCAmelCase_ = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCAmelCase_ = '''lower newer'''
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = processor(text=UpperCamelCase__, images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = processor(images=UpperCamelCase__, visual_prompt=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ = processor.batch_decode(UpperCamelCase__ )
lowerCAmelCase_ = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
| 167 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
a :Optional[int] = logging.get_logger(__name__)
a :Optional[int] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
a :Any = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
a :Optional[int] = {
"junnyu/roformer_chinese_small": 1_536,
"junnyu/roformer_chinese_base": 1_536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
a :Dict = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE :str = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE :Any = RoFormerTokenizer
def __init__( self , _a=None , _a=None , _a=True , _a="[UNK]" , _a="[SEP]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , _a=True , _a=None , **_a , ) -> str:
"""simple docstring"""
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE__ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , lowerCAmelCase__ ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , lowerCAmelCase__ ) != strip_accents
):
SCREAMING_SNAKE_CASE__ : Any = getattr(lowerCAmelCase__ , pre_tok_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE__ : str = do_lower_case
SCREAMING_SNAKE_CASE__ : Any = strip_accents
SCREAMING_SNAKE_CASE__ : Any = pre_tok_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : Any = do_lower_case
def __getstate__( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : Dict = BertPreTokenizer()
return state
def __setstate__( self , _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = d
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__dict__["_tokenizer"].get_vocab()
SCREAMING_SNAKE_CASE__ : int = PreTokenizer.custom(JiebaPreTokenizer(lowerCAmelCase__ ) )
def _a ( self , _a , _a=None ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self , _a , _a = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , _a , _a = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def _a ( self , _a , _a=None , _a=None , _a=False , **_a , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = BertPreTokenizer()
return super().save_pretrained(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
| 132 | '''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__a = datasets.utils.logging.get_logger(__name__)
__a = ['names', 'prefix']
__a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
__a = ['encoding_errors', 'on_bad_lines']
__a = ['date_format']
@dataclass
class A__ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCamelCase_ : str = ","
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[Union[int, List[int], str]] = "infer"
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[Union[int, str, List[int], List[str]]] = None
UpperCamelCase_ : Optional[Union[List[int], List[str]]] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[Literal["c", "python", "pyarrow"]] = None
UpperCamelCase_ : Dict[Union[int, str], Callable[[Any], Any]] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[Union[int, List[int]]] = None
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Optional[Union[str, List[str]]] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = "."
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = '"'
UpperCamelCase_ : int = 0
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : int = 0
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : int = 1_00_00
UpperCamelCase_ : Optional[datasets.Features] = None
UpperCamelCase_ : Optional[str] = "strict"
UpperCamelCase_ : Literal["error", "warn", "skip"] = "error"
UpperCamelCase_ : Optional[str] = None
def _lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
if self.delimiter is not None:
_UpperCAmelCase : List[Any] = self.delimiter
if self.column_names is not None:
_UpperCAmelCase : Union[str, Any] = self.column_names
@property
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Any = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCamelCase_ : Tuple = CsvConfig
def _lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_UpperCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
_UpperCAmelCase : Tuple = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Any = [files]
_UpperCAmelCase : Union[str, Any] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_UpperCAmelCase : Tuple = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Dict = [files]
_UpperCAmelCase : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_UpperCAmelCase : List[str] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
_UpperCAmelCase : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_UpperCAmelCase : List[Any] = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : int ) -> str:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_UpperCAmelCase : Tuple = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
_UpperCAmelCase : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise | 145 | 0 |
"""simple docstring"""
import qiskit
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->qiskit.result.counts.Counts:
'''simple docstring'''
a : Any = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
a : List[Any] = qiskit.QuantumCircuit(_snake_case , _snake_case )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
a : Optional[int] = qiskit.execute(_snake_case , _snake_case , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
a : Union[str, Any] = single_qubit_measure(2, 2)
print(F'''Total count for various states are: {counts}''')
| 352 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if isinstance(_lowercase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __UpperCamelCase :
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
pass
def __a ( self ) -> List[Any]:
pass
def __a ( self ) -> str:
pass
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
a : Dict = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Dict:
a : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
a : int = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Optional[Any]:
a, a : Optional[int] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = {"vision_model": vision_model, "text_model": text_model}
a : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
a : List[str] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Union[str, Any]:
a, a : Dict = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
a : Tuple = {"vision_model": vision_model, "text_model": text_model}
a : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
a : List[str] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
a : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
a : str = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
a : Dict = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
a : List[Any] = after_output[0]
a : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1E-3 )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> List[Any]:
a, a : Union[str, Any] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[Any] = {"vision_model": vision_model, "text_model": text_model}
a : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
a : Tuple = model(
input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
a : int = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[int] = to_atuple(vision_model.config.image_size )
a : Tuple = to_atuple(vision_model.config.patch_size )
a : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
a : str = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
pt_model.to(lowerCAmelCase__ )
pt_model.eval()
# prepare inputs
a : List[Any] = inputs_dict
a : Any = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
a : int = pt_model(**lowerCAmelCase__ ).to_tuple()
a : Union[str, Any] = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__ )
a : Dict = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
a : Optional[int] = fx_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__ )
a : Optional[int] = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ )
pt_model_loaded.to(lowerCAmelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
a : int = pt_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output_loaded.numpy() , 4E-2 )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
a : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = VisionTextDualEncoderModel(lowerCAmelCase__ )
a : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
a : Dict = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ )
a : List[str] = fx_state
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
a : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
a : Optional[int] = VisionTextDualEncoderModel(lowerCAmelCase__ )
a : List[Any] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
a : int = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : int = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : Tuple = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase__ )
@is_pt_flax_cross_test
def __a ( self ) -> Any:
a : List[Any] = self.prepare_config_and_inputs()
a : Tuple = config_inputs_dict.pop("vision_config" )
a : int = config_inputs_dict.pop("text_config" )
a : List[str] = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.check_equivalence_flax_to_pt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __a ( self ) -> List[Any]:
a, a : Optional[int] = self.get_pretrained_model_and_inputs()
a : Optional[int] = model_a(**lowerCAmelCase__ )
a : Optional[int] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase__ )
a : Any = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
a : str = model_a(**lowerCAmelCase__ )
a : Dict = after_outputs[0]
a : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1E-5 )
@require_flax
class __UpperCamelCase ( a__ , unittest.TestCase ):
def __a ( self ) -> List[Any]:
a : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
a : Any = 13
a : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
a : str = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
a : Optional[Any] = random_attention_mask([batch_size, 4] )
a : Optional[Any] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Dict = FlaxViTModel(lowerCAmelCase__ )
a : Dict = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def __a ( self ) -> str:
a : Union[str, Any] = FlaxViTModelTester(self )
a : Dict = FlaxBertModelTester(self )
a : str = vit_model_tester.prepare_config_and_inputs()
a : Any = bert_model_tester.prepare_config_and_inputs()
a, a : Optional[int] = vision_config_and_inputs
a, a, a, a : Dict = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __UpperCamelCase ( a__ , unittest.TestCase ):
def __a ( self ) -> List[Any]:
a : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
a : Tuple = 13
a : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
a : Union[str, Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
a : Tuple = random_attention_mask([batch_size, 4] )
a : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : List[Any] = FlaxCLIPVisionModel(lowerCAmelCase__ )
a : Tuple = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def __a ( self ) -> List[Any]:
a : Tuple = FlaxCLIPVisionModelTester(self )
a : Union[str, Any] = FlaxBertModelTester(self )
a : Dict = clip_model_tester.prepare_config_and_inputs()
a : Optional[int] = bert_model_tester.prepare_config_and_inputs()
a, a : Dict = vision_config_and_inputs
a, a, a, a : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> Dict:
a : str = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
a : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
a : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
a : Optional[int] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" )
a : Optional[Any] = model(**lowerCAmelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
a : List[str] = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1E-3 ) )
| 79 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : Any ,**A : int ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__A = deprecated_arg[3:]
__A = not kwargs.pop(A )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
__A = kwargs.pop("tpu_name" ,self.tpu_name )
__A = kwargs.pop("device_idx" ,self.device_idx )
__A = kwargs.pop("eager_mode" ,self.eager_mode )
__A = kwargs.pop("use_xla" ,self.use_xla )
super().__init__(**A )
snake_case_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Name of TPU"} , )
snake_case_ = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Benchmark models in eager model."} )
snake_case_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def UpperCamelCase_ ( self : Dict ):
requires_backends(self ,["tf"] )
__A = None
if self.tpu:
try:
if self.tpu_name:
__A = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__A = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__A = None
return tpu
@cached_property
def UpperCamelCase_ ( self : Tuple ):
requires_backends(self ,["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__A = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,"GPU" )
__A = tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] ,"GPU" ) # disable GPU
__A = tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def UpperCamelCase_ ( self : List[Any] ):
requires_backends(self ,["tf"] )
return self._setup_tpu is not None
@property
def UpperCamelCase_ ( self : Any ):
requires_backends(self ,["tf"] )
return self._setup_strategy
@property
def UpperCamelCase_ ( self : int ):
requires_backends(self ,["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def UpperCamelCase_ ( self : Tuple ):
requires_backends(self ,["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
return self.n_gpu > 0
| 15 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = DPTConfig()
if "large" in checkpoint_url:
_UpperCAmelCase : List[str] = 1_024
_UpperCAmelCase : Optional[int] = 4_096
_UpperCAmelCase : Union[str, Any] = 24
_UpperCAmelCase : List[Any] = 16
_UpperCAmelCase : List[Any] = [5, 11, 17, 23]
_UpperCAmelCase : int = [256, 512, 1_024, 1_024]
_UpperCAmelCase : Optional[Any] = (1, 384, 384)
if "ade" in checkpoint_url:
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : List[Any] = 150
_UpperCAmelCase : Optional[Any] = "huggingface/label-files"
_UpperCAmelCase : Optional[int] = "ade20k-id2label.json"
_UpperCAmelCase : Tuple = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) ) , "r" ) )
_UpperCAmelCase : str = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : int = idalabel
_UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
_UpperCAmelCase : int = [1, 150, 480, 480]
return config, expected_shape
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_UpperCAmelCase : str = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
_UpperCAmelCase : List[str] = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
_UpperCAmelCase : Dict = name.replace("patch_embed" , "patch_embeddings" )
if "pos_embed" in name:
_UpperCAmelCase : int = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
_UpperCAmelCase : int = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
_UpperCAmelCase : int = name.replace("proj" , "projection" )
if "blocks" in name:
_UpperCAmelCase : Tuple = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name:
_UpperCAmelCase : Optional[Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_UpperCAmelCase : int = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
_UpperCAmelCase : List[Any] = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
_UpperCAmelCase : List[str] = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
_UpperCAmelCase : str = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
_UpperCAmelCase : int = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
_UpperCAmelCase : Tuple = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
_UpperCAmelCase : Optional[Any] = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_UpperCAmelCase : List[str] = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_UpperCAmelCase : Tuple = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
_UpperCAmelCase : Optional[int] = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
_UpperCAmelCase : Optional[int] = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
_UpperCAmelCase : Optional[Any] = name.replace("conv1" , "convolution1" )
if "conv2" in name:
_UpperCAmelCase : Optional[Any] = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_UpperCAmelCase : int = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
_UpperCAmelCase : Optional[Any] = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
_UpperCAmelCase : Dict = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
_UpperCAmelCase : int = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_UpperCAmelCase : int = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
_UpperCAmelCase : Dict = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
_UpperCAmelCase : Tuple = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
_UpperCAmelCase : Optional[int] = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
_UpperCAmelCase : Dict = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
_UpperCAmelCase : List[str] = name.replace("pretrained" , "dpt" )
if "bn" in name:
_UpperCAmelCase : Dict = name.replace("bn" , "batch_norm" )
if "head" in name:
_UpperCAmelCase : Tuple = name.replace("head" , "head.head" )
if "encoder.norm" in name:
_UpperCAmelCase : Optional[Any] = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
_UpperCAmelCase : Dict = name.replace("auxlayer" , "auxiliary_head.head" )
return name
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : int = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_UpperCAmelCase : str = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : List[str] = in_proj_weight[: config.hidden_size, :]
_UpperCAmelCase : Dict = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : str = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase ():
_UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Any = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase : Dict = get_dpt_config(__lowerCAmelCase )
# load original state_dict from URL
_UpperCAmelCase : List[Any] = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(__lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
_UpperCAmelCase : Tuple = state_dict.pop(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = val
# read in qkv matrices
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
_UpperCAmelCase : Any = DPTForSemanticSegmentation(__lowerCAmelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Check outputs on an image
_UpperCAmelCase : Any = 480 if "ade" in checkpoint_url else 384
_UpperCAmelCase : List[str] = DPTImageProcessor(size=__lowerCAmelCase )
_UpperCAmelCase : Any = prepare_img()
_UpperCAmelCase : Dict = image_processor(__lowerCAmelCase , return_tensors="pt" )
# forward pass
_UpperCAmelCase : Tuple = model(**__lowerCAmelCase ).logits if "ade" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth
# Assert logits
_UpperCAmelCase : Dict = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] )
if "ade" in checkpoint_url:
_UpperCAmelCase : str = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] )
assert outputs.shape == torch.Size(__lowerCAmelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , __lowerCAmelCase )
)
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print("Pushing model to hub..." )
model.push_to_hub(
repo_path_or_name=Path(__lowerCAmelCase , __lowerCAmelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=__lowerCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(__lowerCAmelCase , __lowerCAmelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=__lowerCAmelCase , )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowerCamelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 234 | 0 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(lowercase_ ), magnitude * sin(lowercase_ )]
return [magnitude * cos(radians(lowercase_ ) ), magnitude * sin(radians(lowercase_ ) )]
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ = 10**-1 ) -> bool:
A__ = cross(lowercase_ , lowercase_ )
A__ = sum(lowercase_ )
return abs(lowercase_ ) < eps
if __name__ == "__main__":
# Test to check if it works
SCREAMING_SNAKE_CASE = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
SCREAMING_SNAKE_CASE = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
SCREAMING_SNAKE_CASE = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
SCREAMING_SNAKE_CASE = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
SCREAMING_SNAKE_CASE = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
SCREAMING_SNAKE_CASE = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 368 |
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
A__ = model.config
A__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
A__ = MBartConfig(
is_decoder=lowercase_ , is_encoder_decoder=lowercase_ , add_cross_attention=lowercase_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowercase_ , add_final_layer_norm=lowercase_ , )
return encoder_config, decoder_config
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
if "encoder.model" in name:
A__ = name.replace("encoder.model" , "encoder" )
if "decoder.model" in name:
A__ = name.replace("decoder.model" , "decoder" )
if "patch_embed.proj" in name:
A__ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
A__ = name.replace("patch_embed.norm" , "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
A__ = "encoder." + name
if "attn.proj" in name:
A__ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "mask" not in name:
A__ = name.replace("attn" , "attention.self" )
if "norm1" in name:
A__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
A__ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
A__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
A__ = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
A__ = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
A__ = "encoder.layernorm.bias"
return name
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Any:
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
A__ = key.split("." )
A__ = int(key_split[3] )
A__ = int(key_split[5] )
A__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
A__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=False ) -> Dict:
# load original model
A__ = DonutModel.from_pretrained(lowercase_ ).eval()
# load HuggingFace model
A__, A__ = get_configs(lowercase_ )
A__ = DonutSwinModel(lowercase_ )
A__ = MBartForCausalLM(lowercase_ )
A__ = VisionEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
model.eval()
A__ = original_model.state_dict()
A__ = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# verify results on scanned document
A__ = load_dataset("hf-internal-testing/example-documents" )
A__ = dataset["test"][0]["image"].convert("RGB" )
A__ = XLMRobertaTokenizerFast.from_pretrained(lowercase_ , from_slow=lowercase_ )
A__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
A__ = DonutProcessor(lowercase_ , lowercase_ )
A__ = processor(lowercase_ , return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
A__ = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
A__ = "When is the coffee break?"
A__ = task_prompt.replace("{user_input}" , lowercase_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
A__ = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
A__ = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
A__ = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
A__ = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
A__ = "hello world"
else:
raise ValueError("Model name not supported" )
A__ = original_model.decoder.tokenizer(lowercase_ , add_special_tokens=lowercase_ , return_tensors="pt" )[
"input_ids"
]
A__ = original_model.encoder.model.patch_embed(lowercase_ )
A__, A__ = model.encoder.embeddings(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
# verify encoder hidden states
A__ = original_model.encoder(lowercase_ )
A__ = model.encoder(lowercase_ ).last_hidden_state
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-2 )
# verify decoder hidden states
A__ = original_model(lowercase_ , lowercase_ , lowercase_ ).logits
A__ = model(lowercase_ , decoder_input_ids=lowercase_ ).logits
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 230 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = int(snake_case_ )
if decimal in (0, 1): # Exit cases for the recursion
return str(snake_case_ )
lowerCAmelCase__ : List[str] = divmod(snake_case_ , 2 )
return binary_recursive(snake_case_ ) + str(snake_case_ )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = str(snake_case_ ).strip()
if not number:
raise ValueError("""No input value was provided""" )
lowerCAmelCase__ : List[Any] = '''-''' if number.startswith("""-""" ) else ''''''
lowerCAmelCase__ : Optional[Any] = number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return f"""{negative}0b{binary_recursive(int(snake_case_ ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 37 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __lowercase ( snake_case_ : int ) ->Tuple:
'''simple docstring'''
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
def __lowercase ( snake_case_ : str ) ->Dict:
'''simple docstring'''
for char in word:
__A : int = ord(snake_case_ )
if not _is_chinese_char(snake_case_ ):
return 0
return 1
def __lowercase ( snake_case_ : List[str] ) ->List[Any]:
'''simple docstring'''
__A : str = set()
for token in tokens:
__A : List[Any] = len(snake_case_ ) > 1 and is_chinese(snake_case_ )
if chinese_word:
word_set.add(snake_case_ )
__A : Any = list(snake_case_ )
return word_list
def __lowercase ( snake_case_ : List[str] ,snake_case_ : set() ) ->Any:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
__A : List[Any] = max([len(snake_case_ ) for w in chinese_word_set] )
__A : List[str] = bert_tokens
__A , __A : Any = 0, len(snake_case_ )
while start < end:
__A : str = True
if is_chinese(bert_word[start] ):
__A : int = min(end - start ,snake_case_ )
for i in range(snake_case_ ,1 ,-1 ):
__A : Any = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
__A : Any = '''##''' + bert_word[j]
__A : Optional[int] = start + i
__A : str = False
break
if single_word:
start += 1
return bert_word
def __lowercase ( snake_case_ : List[str] ,snake_case_ : LTP ,snake_case_ : BertTokenizer ) ->Dict:
'''simple docstring'''
__A : Optional[Any] = []
for i in range(0 ,len(snake_case_ ) ,100 ):
__A : int = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__A : List[Any] = [get_chinese_word(snake_case_ ) for r in res]
ltp_res.extend(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
__A : Any = []
for i in range(0 ,len(snake_case_ ) ,100 ):
__A : Tuple = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=snake_case_ ,truncation=snake_case_ ,max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(snake_case_ ) == len(snake_case_ )
__A : Optional[int] = []
for input_ids, chinese_word in zip(snake_case_ ,snake_case_ ):
__A : List[str] = []
for id in input_ids:
__A : Tuple = bert_tokenizer._convert_id_to_token(snake_case_ )
input_tokens.append(snake_case_ )
__A : Optional[int] = add_sub_symbol(snake_case_ ,snake_case_ )
__A : Optional[Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case_ ):
if token[:2] == "##":
__A : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(snake_case_ ) == 1 and _is_chinese_char(ord(snake_case_ ) ):
ref_id.append(snake_case_ )
ref_ids.append(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
return ref_ids
def __lowercase ( snake_case_ : int ) ->List[Any]:
'''simple docstring'''
with open(args.file_name ,'''r''' ,encoding='''utf-8''' ) as f:
__A : List[str] = f.readlines()
__A : Optional[Any] = [line.strip() for line in data if len(snake_case_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__A : str = LTP(args.ltp ) # faster in GPU device
__A : Optional[int] = BertTokenizer.from_pretrained(args.bert )
__A : Optional[Any] = prepare_ref(snake_case_ ,snake_case_ ,snake_case_ )
with open(args.save_path ,'''w''' ,encoding='''utf-8''' ) as f:
__A : int = [json.dumps(snake_case_ ) + '''\n''' for ref in ref_ids]
f.writelines(snake_case_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
a_ = parser.parse_args()
main(args)
| 179 | 0 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowerCAmelCase: int = 'src/transformers'
lowerCAmelCase: Dict = 'docs/source/en/tasks'
def lowerCamelCase__ ( _A , _A , _A ):
with open(_A , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : int = f.readlines()
# Find the start prompt.
a : str = 0
while not lines[start_index].startswith(_A ):
start_index += 1
start_index += 1
a : Tuple = start_index
while not lines[end_index].startswith(_A ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase: Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH)
lowerCAmelCase: int = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowerCAmelCase: str = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def lowerCamelCase__ ( _A ):
a : Dict = TASK_GUIDE_TO_MODELS[task_guide]
a : List[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_A , set() )
a : Dict = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def lowerCamelCase__ ( _A , _A=False ):
a , a , a , a : List[Any] = _find_text_in_file(
filename=os.path.join(_A , _A ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
a : str = get_model_list_for_task(_A )
if current_list != new_list:
if overwrite:
with open(os.path.join(_A , _A ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
' to fix this.' )
if __name__ == "__main__":
lowerCAmelCase: Optional[int] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase: Optional[int] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 96 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase: Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[str] = ['PoolFormerFeatureExtractor']
lowerCAmelCase: Tuple = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: str = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Dict = _LazyModule(__name__, globals()['__file__'], _import_structure) | 96 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( _A: List[str] , _A: Any , _A: Tuple ) -> Any:
'''simple docstring'''
lowerCAmelCase = RemBertConfig.from_json_file(_A )
print("""Building PyTorch model from configuration: {}""".format(str(_A ) ) )
lowerCAmelCase = RemBertModel(_A )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_A , _A , _A )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_A ) )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 272 | '''simple docstring'''
from math import sqrt
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }')
| 272 | 1 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class snake_case :
def __init__( self : int , a__ : Tuple , a__ : Optional[Any]=13 , a__ : int=32 , a__ : Tuple=2 , a__ : Union[str, Any]=3 , a__ : Optional[Any]=16 , a__ : Any=[1, 2, 1] , a__ : Tuple=[2, 2, 4] , a__ : Optional[Any]=2 , a__ : List[Any]=2.0 , a__ : Dict=True , a__ : Union[str, Any]=0.0 , a__ : Optional[int]=0.0 , a__ : Union[str, Any]=0.1 , a__ : Tuple="gelu" , a__ : Tuple=False , a__ : Tuple=True , a__ : Dict=0.0_2 , a__ : Dict=1E-5 , a__ : int=True , a__ : Dict=None , a__ : Tuple=True , a__ : Optional[Any]=10 , a__ : Optional[int]=8 , a__ : Union[str, Any]=["stage1", "stage2", "stage3"] , a__ : Union[str, Any]=[1, 2, 3] , ) -> Dict:
'''simple docstring'''
_A = parent
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = embed_dim
_A = depths
_A = num_heads
_A = window_size
_A = mlp_ratio
_A = qkv_bias
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = drop_path_rate
_A = hidden_act
_A = use_absolute_embeddings
_A = patch_norm
_A = layer_norm_eps
_A = initializer_range
_A = is_training
_A = scope
_A = use_labels
_A = type_sequence_label_size
_A = encoder_stride
_A = out_features
_A = out_indices
def a_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = self.get_config()
return config, pixel_values, labels
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a_ ( self : Any , a__ : Tuple , a__ : int , a__ : Any ) -> int:
'''simple docstring'''
_A = MaskFormerSwinModel(config=a__ )
model.to(a__ )
model.eval()
_A = model(a__ )
_A = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_A = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a_ ( self : Dict , a__ : List[str] , a__ : Optional[Any] , a__ : Tuple ) -> str:
'''simple docstring'''
_A = MaskFormerSwinBackbone(config=a__ )
model.to(a__ )
model.eval()
_A = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(a__ ):
_A = ["stem"]
_A = MaskFormerSwinBackbone(config=a__ )
def a_ ( self : Any ) -> Any:
'''simple docstring'''
_A = self.prepare_config_and_inputs()
_A , _A , _A = config_and_inputs
_A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( a_ , a_ , unittest.TestCase):
__UpperCamelCase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__UpperCamelCase = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_A = MaskFormerSwinModelTester(self )
_A = ConfigTester(self , config_class=a__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def a_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def a_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
return
def a_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a_ ( self : Tuple ) -> str:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
@unittest.skip("Swin does not use inputs_embeds" )
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
pass
@unittest.skip("Swin does not support feedforward chunking" )
def a_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
pass
def a_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(a__ )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def a_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def a_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
pass
def a_ ( self : Optional[Any] , a__ : Tuple , a__ : Any , a__ : Tuple , a__ : List[str] ) -> List[str]:
'''simple docstring'''
_A = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(a__ , a__ ) )
_A = outputs.hidden_states
_A = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a__ ) , a__ )
# Swin has a different seq_length
_A = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a_ ( self : Tuple ) -> str:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_A = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
def a_ ( self : str ) -> str:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = 3
_A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_A = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_A = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_A = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_A = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def a_ ( self : str ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def a_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def a_ ( self : str ) -> str:
'''simple docstring'''
pass
def a_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(a__ : str ):
_A = 0
return t
def check_equivalence(a__ : str , a__ : List[Any] , a__ : Optional[Any] , a__ : Tuple={} ):
with torch.no_grad():
_A = model(**a__ , return_dict=a__ , **a__ )
_A = model(**a__ , return_dict=a__ , **a__ ).to_tuple()
def recursive_check(a__ : Any , a__ : List[str] ):
if isinstance(a__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a__ , a__ ):
recursive_check(a__ , a__ )
elif isinstance(a__ , a__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(a__ , a__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(a__ ) , set_nan_tensor_to_zero(a__ ) , atol=1E-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
F""" {torch.isnan(a__ ).any()} and `inf`: {torch.isinf(a__ )}. Dict has"""
F""" `nan`: {torch.isnan(a__ ).any()} and `inf`: {torch.isinf(a__ )}."""
) , )
recursive_check(a__ , a__ )
for model_class in self.all_model_classes:
_A = model_class(a__ )
model.to(a__ )
model.eval()
_A = self._prepare_for_class(a__ , a__ )
_A = self._prepare_for_class(a__ , a__ )
check_equivalence(a__ , a__ , a__ )
_A = self._prepare_for_class(a__ , a__ , return_labels=a__ )
_A = self._prepare_for_class(a__ , a__ , return_labels=a__ )
check_equivalence(a__ , a__ , a__ )
_A = self._prepare_for_class(a__ , a__ )
_A = self._prepare_for_class(a__ , a__ )
check_equivalence(a__ , a__ , a__ , {"output_hidden_states": True} )
_A = self._prepare_for_class(a__ , a__ , return_labels=a__ )
_A = self._prepare_for_class(a__ , a__ , return_labels=a__ )
check_equivalence(a__ , a__ , a__ , {"output_hidden_states": True} )
@require_torch
class snake_case ( unittest.TestCase , a_):
__UpperCamelCase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__UpperCamelCase = MaskFormerSwinConfig
def a_ ( self : Dict ) -> str:
'''simple docstring'''
_A = MaskFormerSwinModelTester(self )
def a_ ( self : Any ) -> int:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
_A = backbone_class(a__ )
backbone.to(a__ )
backbone.eval()
_A = backbone(**a__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , a__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_A = backbone(**a__ , output_hidden_states=a__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_A , _A , _A = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_A = backbone(**a__ , output_attentions=a__ )
self.assertIsNotNone(outputs.attentions ) | 350 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def a__ ( __lowercase , __lowercase , __lowercase = 1_6000 ) -> List[str]:
_A = int(round(sample_rate * max_length ) )
if len(__lowercase ) <= sample_length:
return wav
_A = randint(0 , len(__lowercase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class snake_case :
__UpperCamelCase = field(default=_UpperCamelCase , metadata={'help': 'Name of a dataset from the datasets package'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'A file containing the training audio paths and labels.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'A file containing the validation audio paths and labels.'})
__UpperCamelCase = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
__UpperCamelCase = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
__UpperCamelCase = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
__UpperCamelCase = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
__UpperCamelCase = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class snake_case :
__UpperCamelCase = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
__UpperCamelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Name or path of preprocessor config.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , a__ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def a__ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , __lowercase , __lowercase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
_A = DatasetDict()
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"Make sure to set `--audio_column_name` to the correct audio column - one of "
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"Make sure to set `--label_column_name` to the correct text column - one of "
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_A = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_A = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_A = feature_extractor.model_input_names[0]
def train_transforms(__lowercase ):
_A = []
for audio in batch[data_args.audio_column_name]:
_A = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__lowercase )
_A = feature_extractor(__lowercase , sampling_rate=feature_extractor.sampling_rate )
_A = {model_input_name: inputs.get(__lowercase )}
_A = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__lowercase ):
_A = [audio["array"] for audio in batch[data_args.audio_column_name]]
_A = feature_extractor(__lowercase , sampling_rate=feature_extractor.sampling_rate )
_A = {model_input_name: inputs.get(__lowercase )}
_A = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_A = raw_datasets["train"].features[data_args.label_column_name].names
_A , _A = {}, {}
for i, label in enumerate(__lowercase ):
_A = str(__lowercase )
_A = label
# Load the accuracy metric from the datasets package
_A = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__lowercase ):
_A = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__lowercase , references=eval_pred.label_ids )
_A = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowercase ) , labelaid=__lowercase , idalabel=__lowercase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_A = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__lowercase , output_all_columns=__lowercase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_A = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__lowercase , output_all_columns=__lowercase )
# Initialize our trainer
_A = Trainer(
model=__lowercase , args=__lowercase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=__lowercase , tokenizer=__lowercase , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_A = trainer.evaluate()
trainer.log_metrics("eval" , __lowercase )
trainer.save_metrics("eval" , __lowercase )
# Write model card and (optionally) push to hub
_A = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
if __name__ == "__main__":
main() | 163 | 0 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = [False] * len(__snake_case )
UpperCAmelCase_ : Dict = [-1] * len(__snake_case )
def dfs(__snake_case : Dict , __snake_case : Tuple ):
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Dict = c
for u in graph[v]:
if not visited[u]:
dfs(__snake_case , 1 - c )
for i in range(len(__snake_case ) ):
if not visited[i]:
dfs(__snake_case , 0 )
for i in range(len(__snake_case ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__UpperCAmelCase = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 29 |
"""simple docstring"""
from __future__ import annotations
import math
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = str(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [n]
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if len(str(_SCREAMING_SNAKE_CASE ) ) > 3:
if not is_prime(int(str(_SCREAMING_SNAKE_CASE )[-3:] ) ) or not is_prime(int(str(_SCREAMING_SNAKE_CASE )[:3] ) ):
return False
return True
def lowercase ( _SCREAMING_SNAKE_CASE : int = 11 ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = 13
while len(_SCREAMING_SNAKE_CASE ) != count:
if validate(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = list_truncated_nums(_SCREAMING_SNAKE_CASE )
if all(is_prime(_SCREAMING_SNAKE_CASE ) for i in list_nums ):
list_truncated_primes.append(_SCREAMING_SNAKE_CASE )
num += 2
return list_truncated_primes
def lowercase ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f'''{sum(compute_truncated_primes(11)) = }''')
| 260 | 0 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {"""vocab_file""": """spiece.model"""}
UpperCAmelCase : int = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
UpperCAmelCase : str = {
"""AI-Sweden/gpt-sw3-126m""": 2048,
"""AI-Sweden/gpt-sw3-350m""": 2048,
"""AI-Sweden/gpt-sw3-1.6b""": 2048,
"""AI-Sweden/gpt-sw3-6.7b""": 2048,
"""AI-Sweden/gpt-sw3-20b""": 2048,
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : int = VOCAB_FILES_NAMES
_lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> None:
'''simple docstring'''
a__ : str ={} if sp_model_kwargs is None else sp_model_kwargs
a__ : Any =kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
a__ : int ="None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
a__ : Dict ="<|endoftext|>" if eos_token is None else eos_token
a__ : Any ="<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
a__ : Optional[int] =unk_token if pad_token is None else pad_token
a__ : List[str] =eos_token if bos_token is None else bos_token
else:
a__ : Optional[int] ="<pad>" if pad_token is None else pad_token
a__ : Any ="<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
a__ : List[str] =do_lower_case
a__ : Union[str, Any] =remove_space
a__ : Union[str, Any] =keep_accents
a__ : int =vocab_file
a__ : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
# Used for whitespace normalization in input texts
# fmt : off
a__ : List[Any] ={" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
a__ : Optional[Any] =re.compile(
F'''[{"".join(map(lowerCAmelCase__ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]''' )
def __getstate__( self ) -> str:
'''simple docstring'''
a__ : Dict =self.__dict__.copy()
a__ : Optional[int] =None
return state
def __setstate__( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__ : str ={}
a__ : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _lowercase ( self ) -> int:
'''simple docstring'''
return len(self.sp_model )
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : List[str] =self.non_printing_characters_re.sub("" , lowerCAmelCase__ )
# Normalize whitespaces
a__ : Optional[int] ="".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
a__ : Optional[Any] =unicodedata.normalize("NFC" , lowerCAmelCase__ )
return text
def _lowercase ( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
a__ : Dict =self.preprocess_text(lowerCAmelCase__ )
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
return self.sp_model.PieceToId(lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCAmelCase__ )
@staticmethod
def _lowercase ( lowerCAmelCase__ ) -> str:
'''simple docstring'''
return out_string
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : int =[]
a__ : List[Any] =""
a__ : Tuple =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
a__ : Tuple =True
a__ : Any =[]
else:
current_sub_tokens.append(lowerCAmelCase__ )
a__ : List[str] =False
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string
def _lowercase ( self ) -> Dict[str, int]:
'''simple docstring'''
a__ : List[str] ={self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ : List[Any] =os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , "wb" ) as fi:
a__ : List[str] =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Tuple =self.preprocess_text(lowerCAmelCase__ )
a__ : str =self.sp_model.encode(lowerCAmelCase__ )
else:
a__ : List[str] =[self.preprocess_text(lowerCAmelCase__ ) for t in text]
a__ : int =self.sp_model.encode(lowerCAmelCase__ )
if return_tensors is True or return_tensors == "pt":
a__ : Optional[int] =torch.tensor(lowerCAmelCase__ )
return token_ids
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
return self.sp_model.decode(lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> List[int]:
'''simple docstring'''
a__ : Tuple =[F'''User: {text}''' if is_user else F'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
a__ : Tuple =(
F'''{self.eos_token}{self.bos_token}''' + F'''{self.bos_token}'''.join(lowerCAmelCase__ ) + F'''{self.bos_token}Bot:'''
)
return self.encode(text=lowerCAmelCase__ )
| 350 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Any = (PNDMScheduler,)
_lowercase : str = (("""num_inference_steps""", 50),)
def _lowercase ( self , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : Dict ={
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowerCAmelCase__ )
return config
def _lowercase ( self , lowerCAmelCase__=0 , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] =dict(self.forward_default_kwargs )
a__ : Tuple =kwargs.pop("num_inference_steps" , lowerCAmelCase__ )
a__ : List[str] =self.dummy_sample
a__ : List[str] =0.1 * sample
a__ : str =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a__ : int =self.get_scheduler_config(**lowerCAmelCase__ )
a__ : Union[str, Any] =scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
a__ : Any =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
a__ : List[Any] =scheduler_class.from_pretrained(lowerCAmelCase__ )
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
a__ : str =dummy_past_residuals[:]
a__ : Any =scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
a__ : Dict =new_scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a__ : Optional[int] =scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
a__ : Union[str, Any] =new_scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowercase ( self ) -> int:
'''simple docstring'''
pass
def _lowercase ( self , lowerCAmelCase__=0 , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[int] =dict(self.forward_default_kwargs )
a__ : List[str] =kwargs.pop("num_inference_steps" , lowerCAmelCase__ )
a__ : List[str] =self.dummy_sample
a__ : int =0.1 * sample
a__ : Tuple =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a__ : Dict =self.get_scheduler_config()
a__ : List[str] =scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
a__ : Dict =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
a__ : Dict =scheduler_class.from_pretrained(lowerCAmelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residual (must be after setting timesteps)
a__ : Optional[int] =dummy_past_residuals[:]
a__ : Optional[Any] =scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
a__ : List[Any] =new_scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a__ : List[str] =scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
a__ : Any =new_scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowercase ( self , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : Union[str, Any] =self.scheduler_classes[0]
a__ : Optional[Any] =self.get_scheduler_config(**lowerCAmelCase__ )
a__ : Any =scheduler_class(**lowerCAmelCase__ )
a__ : int =1_0
a__ : Union[str, Any] =self.dummy_model()
a__ : Optional[int] =self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
a__ : List[Any] =model(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Optional[Any] =scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
a__ : int =model(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : int =scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
return sample
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : str =dict(self.forward_default_kwargs )
a__ : Tuple =kwargs.pop("num_inference_steps" , lowerCAmelCase__ )
for scheduler_class in self.scheduler_classes:
a__ : Union[str, Any] =self.get_scheduler_config()
a__ : List[str] =scheduler_class(**lowerCAmelCase__ )
a__ : List[Any] =self.dummy_sample
a__ : Dict =0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase__ , "set_timesteps" ):
scheduler.set_timesteps(lowerCAmelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCAmelCase__ , "set_timesteps" ):
a__ : int =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a__ : Tuple =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a__ : str =dummy_past_residuals[:]
a__ : List[Any] =scheduler.step_prk(lowerCAmelCase__ , 0 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
a__ : int =scheduler.step_prk(lowerCAmelCase__ , 1 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
a__ : List[str] =scheduler.step_plms(lowerCAmelCase__ , 0 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
a__ : Dict =scheduler.step_plms(lowerCAmelCase__ , 1 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase__ )
a__ : Optional[Any] =self.scheduler_classes[0]
a__ : Tuple =self.get_scheduler_config(steps_offset=1 )
a__ : Optional[Any] =scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=lowerCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=lowerCAmelCase__ )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Dict =2_7
for scheduler_class in self.scheduler_classes:
a__ : Tuple =self.dummy_sample
a__ : Dict =0.1 * sample
a__ : Dict =self.get_scheduler_config()
a__ : int =scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
a__ : Any =scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ):
a__ : List[Any] =self.scheduler_classes[0]
a__ : Dict =self.get_scheduler_config()
a__ : Tuple =scheduler_class(**lowerCAmelCase__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] =self.full_loop()
a__ : str =torch.sum(torch.abs(lowerCAmelCase__ ) )
a__ : Optional[Any] =torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : str =self.full_loop(prediction_type="v_prediction" )
a__ : int =torch.sum(torch.abs(lowerCAmelCase__ ) )
a__ : Optional[int] =torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple =self.full_loop(set_alpha_to_one=lowerCAmelCase__ , beta_start=0.01 )
a__ : str =torch.sum(torch.abs(lowerCAmelCase__ ) )
a__ : Dict =torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict =self.full_loop(set_alpha_to_one=lowerCAmelCase__ , beta_start=0.01 )
a__ : Union[str, Any] =torch.sum(torch.abs(lowerCAmelCase__ ) )
a__ : Union[str, Any] =torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 148 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.