code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ''''''
UpperCamelCase = ''''''
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = 256
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = cva.imread(SCREAMING_SNAKE_CASE_ , 0 )
UpperCamelCase = copy.deepcopy(self.img )
UpperCamelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""" )
UpperCamelCase = np.sum(SCREAMING_SNAKE_CASE_ )
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase = x[i] / self.k
self.sk += prk
UpperCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCamelCase = int(last % last )
UpperCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCamelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCamelCase = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
_SCREAMING_SNAKE_CASE = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 343 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = """convbert"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=3_05_22 , SCREAMING_SNAKE_CASE_ : int=7_68 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : Dict=30_72 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : List[Any]=7_68 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Any=9 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A: Dict = vocab_size
A: Tuple = hidden_size
A: Optional[int] = num_hidden_layers
A: List[str] = num_attention_heads
A: int = intermediate_size
A: int = hidden_act
A: List[str] = hidden_dropout_prob
A: int = attention_probs_dropout_prob
A: Tuple = max_position_embeddings
A: Any = type_vocab_size
A: str = initializer_range
A: Union[str, Any] = layer_norm_eps
A: str = embedding_size
A: Optional[int] = head_ratio
A: List[Any] = conv_kernel_size
A: List[Any] = num_groups
A: Optional[int] = classifier_dropout
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def _snake_case ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A: Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A: List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 319 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : str = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCAmelCase : Union[str, Any] = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
UpperCAmelCase : Optional[int] = {"""facebook/blenderbot-3B""": 128}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = VOCAB_FILES_NAMES
_lowercase : str = PRETRAINED_VOCAB_FILES_MAP
_lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Dict = ["""input_ids""", """attention_mask"""]
_lowercase : Tuple = BlenderbotTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : List[str] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCAmelCase__ ) != add_prefix_space:
a__ : List[Any] =getattr(lowerCAmelCase__ , pre_tok_state.pop("type" ) )
a__ : Union[str, Any] =add_prefix_space
a__ : Dict =pre_tok_class(**lowerCAmelCase__ )
a__ : Dict =add_prefix_space
a__ : Optional[int] ="post_processor"
a__ : List[str] =getattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
if tokenizer_component_instance:
a__ : Optional[int] =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : Dict =tuple(state["sep"] )
if "cls" in state:
a__ : List[Any] =tuple(state["cls"] )
a__ : List[str] =False
if state.get("add_prefix_space" , lowerCAmelCase__ ) != add_prefix_space:
a__ : Union[str, Any] =add_prefix_space
a__ : Any =True
if state.get("trim_offsets" , lowerCAmelCase__ ) != trim_offsets:
a__ : int =trim_offsets
a__ : Optional[int] =True
if changes_to_apply:
a__ : str =getattr(lowerCAmelCase__ , state.pop("type" ) )
a__ : Tuple =component_class(**lowerCAmelCase__ )
setattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowercase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
a__ : int =AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else value
a__ : Union[str, Any] =value
def _lowercase ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
'''simple docstring'''
a__ : int =kwargs.get("is_split_into_words" , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowercase ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
'''simple docstring'''
a__ : Dict =kwargs.get("is_split_into_words" , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
a__ : Union[str, Any] =self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
a__ : Any =[self.sep_token_id]
a__ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Optional[Any]:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def _lowercase ( self , lowerCAmelCase__ ) -> List[int]:
'''simple docstring'''
a__ : int =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCAmelCase__ )
a__ : List[str] =" ".join(lowerCAmelCase__ )
a__ : Union[str, Any] =self.encode(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > self.model_max_length:
a__ : int =input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 369 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Any = """char"""
_lowercase : str = """bpe"""
_lowercase : List[Any] = """wp"""
UpperCAmelCase : List[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Tuple = ["""image_processor""", """char_tokenizer"""]
_lowercase : Any = """ViTImageProcessor"""
_lowercase : Optional[Any] = """MgpstrTokenizer"""
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[int] =None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase__ , )
a__ : List[str] =kwargs.pop("feature_extractor" )
a__ : List[str] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
a__ : str =tokenizer
a__ : List[str] =AutoTokenizer.from_pretrained("gpt2" )
a__ : Optional[int] =AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
a__ : Union[str, Any] =self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None:
a__ : int =self.char_tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
a__ : Tuple =encodings["input_ids"]
return inputs
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
a__ , a__ , a__ : Any =sequences
a__ : Union[str, Any] =char_preds.size(0 )
a__ , a__ : Dict =self._decode_helper(lowerCAmelCase__ , "char" )
a__ , a__ : List[Any] =self._decode_helper(lowerCAmelCase__ , "bpe" )
a__ , a__ : Optional[int] =self._decode_helper(lowerCAmelCase__ , "wp" )
a__ : List[Any] =[]
a__ : Dict =[]
for i in range(lowerCAmelCase__ ):
a__ : int =[char_scores[i], bpe_scores[i], wp_scores[i]]
a__ : Tuple =[char_strs[i], bpe_strs[i], wp_strs[i]]
a__ : Any =scores.index(max(lowerCAmelCase__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
a__ : Dict ={}
a__ : str =final_strs
a__ : Optional[int] =final_scores
a__ : Union[str, Any] =char_strs
a__ : List[str] =bpe_strs
a__ : Union[str, Any] =wp_strs
return out
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
if format == DecodeType.CHARACTER:
a__ : Optional[Any] =self.char_decode
a__ : Dict =1
a__ : Tuple ="[s]"
elif format == DecodeType.BPE:
a__ : str =self.bpe_decode
a__ : Dict =2
a__ : Optional[int] ="#"
elif format == DecodeType.WORDPIECE:
a__ : Union[str, Any] =self.wp_decode
a__ : List[Any] =1_0_2
a__ : Dict ="[SEP]"
else:
raise ValueError(F'''Format {format} is not supported.''' )
a__ , a__ : Any =[], []
a__ : str =pred_logits.size(0 )
a__ : Optional[Any] =pred_logits.size(1 )
a__ , a__ : Optional[int] =pred_logits.topk(1 , dim=-1 , largest=lowerCAmelCase__ , sorted=lowerCAmelCase__ )
a__ : Optional[Any] =preds_index.view(-1 , lowerCAmelCase__ )[:, 1:]
a__ : Dict =decoder(lowerCAmelCase__ )
a__ , a__ : Any =torch.nn.functional.softmax(lowerCAmelCase__ , dim=2 ).max(dim=2 )
a__ : int =preds_max_prob[:, 1:]
for index in range(lowerCAmelCase__ ):
a__ : Optional[Any] =preds_str[index].find(lowerCAmelCase__ )
a__ : Optional[int] =preds_str[index][:pred_eos]
a__ : List[Any] =preds_index[index].cpu().tolist()
a__ : List[Any] =pred_index.index(lowerCAmelCase__ ) if eos_token in pred_index else -1
a__ : Union[str, Any] =preds_max_prob[index][: pred_eos_index + 1]
a__ : List[Any] =pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowerCAmelCase__ )
conf_scores.append(lowerCAmelCase__ )
return dec_strs, conf_scores
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : int =[seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(lowerCAmelCase__ )]
return decode_strs
def _lowercase ( self , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] =[seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(lowerCAmelCase__ )]
return decode_strs
| 148 | 0 |
'''simple docstring'''
import math
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ) -> Optional[Any]:
if not isinstance(lowercase__ ,lowercase__ ):
_a : Any =F"Input value of [number={number}] must be an integer"
raise TypeError(lowercase__ )
if number < 1:
_a : Tuple =F"Input value of [number={number}] must be > 0"
raise ValueError(lowercase__ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_a : Optional[Any] =int(math.log(number // 3 ,2 ) ) + 2
_a : int =[3, 5]
_a : Dict =2
_a : Optional[int] =3
for block in range(1 ,lowercase__ ):
for _ in range(lowercase__ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
A__: List[str] = 0
try:
A__: Any = proth(number)
except ValueError:
print(F"ValueError: there is no {number}th Proth number")
continue
print(F"The {number}th Proth number: {value}")
| 276 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowercase__ = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
lowercase__ = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
lowercase__ = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _snake_case ( lowercase__ , lowercase__ ):
return float((preds == labels).mean() )
def _snake_case ( lowercase__ , lowercase__ , lowercase__="binary" ):
_lowerCamelCase : str = simple_accuracy(lowercase__ , lowercase__ )
_lowerCamelCase : Any = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ , average=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Any = {}
for id_pred, label in zip(lowercase__ , lowercase__ ):
_lowerCamelCase : Tuple = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
_lowerCamelCase : Union[str, Any] = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_lowerCamelCase : Optional[Any] = [(pred, label)]
_lowerCamelCase, _lowerCamelCase : Optional[int] = [], []
for question, preds_labels in question_map.items():
_lowerCamelCase, _lowerCamelCase : Tuple = zip(*lowercase__ )
_lowerCamelCase : List[str] = fa_score(y_true=lowercase__ , y_pred=lowercase__ , average='macro' )
fas.append(lowercase__ )
_lowerCamelCase : int = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase__ ) )
ems.append(lowercase__ )
_lowerCamelCase : Optional[Any] = float(sum(lowercase__ ) / len(lowercase__ ) )
_lowerCamelCase : Optional[int] = sum(lowercase__ ) / len(lowercase__ )
_lowerCamelCase : List[Any] = float(fa_score(y_true=lowercase__ , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def A_ ( self ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def A_ ( self ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def A_ ( self , lowercase , lowercase ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "cb":
return acc_and_fa(lowercase , lowercase , fa_avg='macro' )
elif self.config_name == "record":
_lowerCamelCase : List[str] = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
_lowerCamelCase : Union[str, Any] = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(lowercase , lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(lowercase , lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' ) | 96 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
A : str = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
A : Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
with open(_UpperCamelCase , "rb" ) as f:
__lowerCAmelCase = Image.open(_UpperCamelCase )
return im.convert("RGB" )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : Optional[str] =field(
default=lowerCAmelCase__ ,metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} ,)
__UpperCAmelCase : Optional[str] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__UpperCAmelCase : Optional[str] =field(default=lowerCAmelCase__ ,metadata={"""help""": """A folder containing the training data."""} )
__UpperCAmelCase : Optional[str] =field(default=lowerCAmelCase__ ,metadata={"""help""": """A folder containing the validation data."""} )
__UpperCAmelCase : Optional[float] =field(
default=0.15 ,metadata={"""help""": """Percent to split off of train for validation."""} )
__UpperCAmelCase : Optional[int] =field(
default=lowerCAmelCase__ ,metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} ,)
__UpperCAmelCase : Optional[int] =field(
default=lowerCAmelCase__ ,metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} ,)
def snake_case ( self ):
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : str =field(
default="""google/vit-base-patch16-224-in21k""" ,metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ,)
__UpperCAmelCase : Optional[str] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(lowerCAmelCase__ )} ,)
__UpperCAmelCase : Optional[str] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCAmelCase : Optional[str] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
__UpperCAmelCase : str =field(
default="""main""" ,metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} ,)
__UpperCAmelCase : str =field(default=lowerCAmelCase__ ,metadata={"""help""": """Name or path of preprocessor config."""} )
__UpperCAmelCase : bool =field(
default=lowerCAmelCase__ ,metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} ,)
__UpperCAmelCase : bool =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} ,)
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = torch.stack([example["pixel_values"] for example in examples] )
__lowerCAmelCase = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
__lowerCAmelCase = {}
if data_args.train_dir is not None:
__lowerCAmelCase = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
__lowerCAmelCase = os.path.join(data_args.validation_dir , "**" )
__lowerCAmelCase = load_dataset(
"imagefolder" , data_files=_UpperCamelCase , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
__lowerCAmelCase = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _UpperCamelCase ) and data_args.train_val_split > 0.0:
__lowerCAmelCase = dataset["train"].train_test_split(data_args.train_val_split )
__lowerCAmelCase = split["train"]
__lowerCAmelCase = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__lowerCAmelCase = dataset["train"].features["labels"].names
__lowerCAmelCase , __lowerCAmelCase = {}, {}
for i, label in enumerate(_UpperCamelCase ):
__lowerCAmelCase = str(_UpperCamelCase )
__lowerCAmelCase = label
# Load the accuracy metric from the datasets package
__lowerCAmelCase = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
__lowerCAmelCase = image_processor.size["shortest_edge"]
else:
__lowerCAmelCase = (image_processor.size["height"], image_processor.size["width"])
__lowerCAmelCase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
__lowerCAmelCase = Compose(
[
RandomResizedCrop(_UpperCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
__lowerCAmelCase = Compose(
[
Resize(_UpperCamelCase ),
CenterCrop(_UpperCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(_UpperCamelCase ):
__lowerCAmelCase = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(_UpperCamelCase ):
__lowerCAmelCase = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
__lowerCAmelCase = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
__lowerCAmelCase = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_UpperCamelCase )
# Initalize our trainer
__lowerCAmelCase = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowerCAmelCase = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
__lowerCAmelCase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 368 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [[] for _ in range(_UpperCamelCase )]
__lowerCAmelCase = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(_UpperCamelCase ) <= key:
return input_string
for position, character in enumerate(_UpperCamelCase ):
__lowerCAmelCase = position % (lowest * 2) # puts it in bounds
__lowerCAmelCase = min(_UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_UpperCamelCase )
__lowerCAmelCase = ["".join(_UpperCamelCase ) for row in temp_grid]
__lowerCAmelCase = "".join(_UpperCamelCase )
return output_string
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
__lowerCAmelCase = [[] for _ in range(_UpperCamelCase )] # generates template
for position in range(len(_UpperCamelCase ) ):
__lowerCAmelCase = position % (lowest * 2) # puts it in bounds
__lowerCAmelCase = min(_UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
__lowerCAmelCase = 0
for row in temp_grid: # fills in the characters
__lowerCAmelCase = input_string[counter : counter + len(_UpperCamelCase )]
grid.append(list(_UpperCamelCase ) )
counter += len(_UpperCamelCase )
__lowerCAmelCase = "" # reads as zigzag
for position in range(len(_UpperCamelCase ) ):
__lowerCAmelCase = position % (lowest * 2) # puts it in bounds
__lowerCAmelCase = min(_UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = {}
for key_guess in range(1 , len(_UpperCamelCase ) ): # tries every key
__lowerCAmelCase = decrypt(_UpperCamelCase , _UpperCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __a = 4 ):
snake_case_ : str = abs(__a ) or 4
return [[1 + x + y * row_size for x in range(__a )] for y in range(__a )]
def SCREAMING_SNAKE_CASE__ ( __a ):
return reverse_row(transpose(__a ) )
# OR.. transpose(reverse_column(matrix))
def SCREAMING_SNAKE_CASE__ ( __a ):
return reverse_row(reverse_column(__a ) )
# OR.. reverse_column(reverse_row(matrix))
def SCREAMING_SNAKE_CASE__ ( __a ):
return reverse_column(transpose(__a ) )
# OR.. transpose(reverse_row(matrix))
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Any = [list(__a ) for x in zip(*__a )]
return matrix
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : int = matrix[::-1]
return matrix
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : str = [x[::-1] for x in matrix]
return matrix
def SCREAMING_SNAKE_CASE__ ( __a ):
for i in matrix:
print(*__a )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
_SCREAMING_SNAKE_CASE = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
_SCREAMING_SNAKE_CASE = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 327 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Tuple = "mask2former"
lowerCAmelCase__ : List[Any] = ["swin"]
lowerCAmelCase__ : str = {"hidden_size": "hidden_dim"}
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 6 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 2_55 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 1_25_44 , _UpperCAmelCase : float = 3.0 , _UpperCAmelCase : float = 0.75 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : List[int] = [4, 8, 16, 32] , _UpperCAmelCase : bool = None , **_UpperCAmelCase : List[str] , ) -> int:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowercase = CONFIG_MAPPING['swin'](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = backbone_config.pop('model_type' )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(_UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
__lowercase = backbone_config
__lowercase = feature_size
__lowercase = mask_feature_size
__lowercase = hidden_dim
__lowercase = encoder_feedforward_dim
__lowercase = activation_function
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = num_attention_heads
__lowercase = dropout
__lowercase = dim_feedforward
__lowercase = pre_norm
__lowercase = enforce_input_projection
__lowercase = common_stride
__lowercase = ignore_value
__lowercase = num_queries
__lowercase = no_object_weight
__lowercase = class_weight
__lowercase = mask_weight
__lowercase = dice_weight
__lowercase = train_num_points
__lowercase = oversample_ratio
__lowercase = importance_sample_ratio
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = use_auxiliary_loss
__lowercase = feature_strides
__lowercase = output_auxiliary_logits
__lowercase = decoder_layers
super().__init__(**_UpperCAmelCase )
@classmethod
def a__ ( cls : Union[str, Any] , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return cls(
backbone_config=_UpperCAmelCase , **_UpperCAmelCase , )
def a__ ( self : str ) -> Dict[str, any]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 325 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 81 | import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case ( ) -> List[Any]:
_A = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
))
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=snake_case__ , default=1 , help="""Number of TPU cores to use (1 or 8).""")
# positional
parser.add_argument(
"""training_script""" , type=snake_case__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=snake_case__)
return parser.parse_args()
def snake_case ( ) -> List[str]:
_A = parse_args()
# Import training_script as a module.
_A = Path(args.training_script)
sys.path.append(str(script_fpath.parent.resolve()))
_A = script_fpath.stem
_A = importlib.import_module(snake_case__)
# Patch sys.argv
_A = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores)]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores)
if __name__ == "__main__":
main()
| 81 | 1 |
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase_ ( a_ ):
'''simple docstring'''
def __init__( self , *a , **a ) -> Tuple:
super().__init__(*snake_case__ , **snake_case__ )
snake_case_ = {}
def _UpperCamelCase ( self , a , *a , **a ) -> Union[str, Any]:
snake_case_ = super().add_tokens(snake_case__ , *snake_case__ , **snake_case__ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
' `placeholder_token` that is not already in the tokenizer.' )
def _UpperCamelCase ( self , a , *a , a=1 , **a ) -> Any:
snake_case_ = []
if num_vec_per_token == 1:
self.try_adding_tokens(snake_case__ , *snake_case__ , **snake_case__ )
output.append(snake_case__ )
else:
snake_case_ = []
for i in range(snake_case__ ):
snake_case_ = placeholder_token + F'''_{i}'''
self.try_adding_tokens(snake_case__ , *snake_case__ , **snake_case__ )
output.append(snake_case__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
snake_case_ = output
def _UpperCamelCase ( self , a , a=False , a=1.0 ) -> List[str]:
if isinstance(snake_case__ , snake_case__ ):
snake_case_ = []
for i in range(len(snake_case__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=snake_case__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
snake_case_ = self.token_map[placeholder_token]
snake_case_ = tokens[: 1 + int(len(snake_case__ ) * prop_tokens_to_load )]
if vector_shuffle:
snake_case_ = copy.copy(snake_case__ )
random.shuffle(snake_case__ )
snake_case_ = text.replace(snake_case__ , ' '.join(snake_case__ ) )
return text
def __call__( self , a , *a , a=False , a=1.0 , **a ) -> Dict:
return super().__call__(
self.replace_placeholder_tokens_in_text(
snake_case__ , vector_shuffle=snake_case__ , prop_tokens_to_load=snake_case__ ) , *snake_case__ , **snake_case__ , )
def _UpperCamelCase ( self , a , *a , a=False , a=1.0 , **a ) -> Union[str, Any]:
return super().encode(
self.replace_placeholder_tokens_in_text(
snake_case__ , vector_shuffle=snake_case__ , prop_tokens_to_load=snake_case__ ) , *snake_case__ , **snake_case__ , )
| 178 |
UpperCAmelCase : Optional[Any] ={
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 128 | 0 |
import os
def snake_case (__lowercase = "input.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(__lowercase ) , __lowercase ) ) as input_file:
_snake_case : List[Any] = [
[int(__lowercase ) for element in line.split("," )]
for line in input_file.readlines()
]
_snake_case : Optional[Any] = len(__lowercase )
_snake_case : Optional[Any] = len(matrix[0] )
_snake_case : Optional[int] = [[-1 for _ in range(__lowercase )] for _ in range(__lowercase )]
for i in range(__lowercase ):
_snake_case : Optional[int] = matrix[i][0]
for j in range(1 , __lowercase ):
for i in range(__lowercase ):
_snake_case : Union[str, Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __lowercase ):
_snake_case : int = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_snake_case : int = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'''{solution() = }''') | 352 | from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
@dataclass
class lowercase_ ( __snake_case ):
_lowerCamelCase = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **lowercase_ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_snake_case : List[str] = deprecated_arg[3:]
_snake_case : Optional[int] = not kwargs.pop(lowercase_ )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
_snake_case : Tuple = kwargs.pop("tpu_name" , self.tpu_name )
_snake_case : Any = kwargs.pop("device_idx" , self.device_idx )
_snake_case : List[str] = kwargs.pop("eager_mode" , self.eager_mode )
_snake_case : List[str] = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**lowercase_ )
_lowerCamelCase = field(
default=__snake_case , metadata={'help': 'Name of TPU'} , )
_lowerCamelCase = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
_lowerCamelCase = field(default=__snake_case , metadata={'help': 'Benchmark models in eager model.'} )
_lowerCamelCase = field(
default=__snake_case , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
_snake_case : str = None
if self.tpu:
try:
if self.tpu_name:
_snake_case : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_snake_case : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_snake_case : Union[str, Any] = None
return tpu
@cached_property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_snake_case : List[str] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
_snake_case : Any = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
_snake_case : Any = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCamelCase ( self ):
return self.n_gpu > 0 | 284 | 0 |
"""simple docstring"""
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_SCREAMING_SNAKE_CASE : int = logging.getLogger()
_SCREAMING_SNAKE_CASE : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class a ( __snake_case ):
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = {'source': 'What is love ?', 'target': 'life'}
lowerCamelCase_ = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCamelCase_ = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(__SCREAMING_SNAKE_CASE , F'''{split}.{field}''' ) , 'w' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int = "pytorch" ) -> int:
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = os.path.join(__SCREAMING_SNAKE_CASE , 'output' )
lowerCamelCase_ = os.path.join(__SCREAMING_SNAKE_CASE , 'data' )
self._create_dummy_data(data_dir=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = F'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(F'''--gpus={gpus}''' )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
lowerCamelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env() )
lowerCamelCase_ = os.path.join(__SCREAMING_SNAKE_CASE , 'metrics.json' )
with open(__SCREAMING_SNAKE_CASE ) as f:
lowerCamelCase_ = json.load(__SCREAMING_SNAKE_CASE )
return result
@require_torch_gpu
def UpperCamelCase ( self : str ) -> List[str]:
lowerCamelCase_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def UpperCamelCase ( self : List[str] ) -> Tuple:
lowerCamelCase_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def UpperCamelCase ( self : str ) -> Tuple:
lowerCamelCase_ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowerCamelCase_ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 183 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''multiplicative_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 1
for i in range(0 , len(__A)):
total *= numbers[i]
_a = str(__A)
steps += 1
return steps
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''additive_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 0
for i in range(0 , len(__A)):
total += numbers[i]
_a = str(__A)
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211 | 0 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str , lowercase__ : pyspark.sql.DataFrame , lowercase__ : Optional[NamedSplit] = None , lowercase__ : Optional[Features] = None , lowercase__ : bool = True , lowercase__ : str = None , lowercase__ : bool = False , lowercase__ : str = None , lowercase__ : bool = True , lowercase__ : str = "arrow" , **lowercase__ : Any , ):
'''simple docstring'''
super().__init__(
split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , **lowercase__ , )
lowerCAmelCase__ = load_from_cache_file
lowerCAmelCase__ = file_format
lowerCAmelCase__ = Spark(
df=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , working_dir=lowercase__ , **lowercase__ , )
def __snake_case ( self : Tuple):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
lowerCAmelCase__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split)
| 119 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 119 | 1 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__UpperCAmelCase : List[str] = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__UpperCAmelCase : str = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def A__ ( SCREAMING_SNAKE_CASE__) -> Dict:
__snake_case: List[Any] = numpy.dtype(numpy.uintaa).newbyteorder(""">""")
return numpy.frombuffer(bytestream.read(4) , dtype=lowerCAmelCase_)[0]
@deprecated(lowerCAmelCase_ , """Please use tf.data to implement this functionality.""")
def A__ ( SCREAMING_SNAKE_CASE__) -> List[str]:
print("""Extracting""" , f.name)
with gzip.GzipFile(fileobj=lowerCAmelCase_) as bytestream:
__snake_case: List[Any] = _readaa(lowerCAmelCase_)
if magic != 2051:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name))
__snake_case: Any = _readaa(lowerCAmelCase_)
__snake_case: Dict = _readaa(lowerCAmelCase_)
__snake_case: int = _readaa(lowerCAmelCase_)
__snake_case: List[Any] = bytestream.read(rows * cols * num_images)
__snake_case: List[Any] = numpy.frombuffer(lowerCAmelCase_ , dtype=numpy.uinta)
__snake_case: List[str] = data.reshape(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , 1)
return data
@deprecated(lowerCAmelCase_ , """Please use tf.one_hot on tensors.""")
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
__snake_case: Tuple = labels_dense.shape[0]
__snake_case: int = numpy.arange(lowerCAmelCase_) * num_classes
__snake_case: List[Any] = numpy.zeros((num_labels, num_classes))
__snake_case: int = 1
return labels_one_hot
@deprecated(lowerCAmelCase_ , """Please use tf.data to implement this functionality.""")
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=10) -> List[str]:
print("""Extracting""" , f.name)
with gzip.GzipFile(fileobj=lowerCAmelCase_) as bytestream:
__snake_case: Tuple = _readaa(lowerCAmelCase_)
if magic != 2049:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name))
__snake_case: Tuple = _readaa(lowerCAmelCase_)
__snake_case: Union[str, Any] = bytestream.read(lowerCAmelCase_)
__snake_case: Any = numpy.frombuffer(lowerCAmelCase_ , dtype=numpy.uinta)
if one_hot:
return _dense_to_one_hot(lowerCAmelCase_ , lowerCAmelCase_)
return labels
class __snake_case :
'''simple docstring'''
@deprecated(
A , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self : Union[str, Any] , A : int , A : Union[str, Any] , A : Any=False , A : Optional[int]=False , A : Tuple=dtypes.floataa , A : List[Any]=True , A : Optional[int]=None , ):
__snake_case , __snake_case: Union[str, Any] = random_seed.get_seed(A )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__snake_case: Tuple = dtypes.as_dtype(A ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
__snake_case: List[Any] = 10_000
__snake_case: Tuple = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
__snake_case: List[str] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__snake_case: Any = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__snake_case: str = images.astype(numpy.floataa )
__snake_case: Dict = numpy.multiply(A , 1.0 / 255.0 )
__snake_case: Tuple = images
__snake_case: Optional[Any] = labels
__snake_case: Dict = 0
__snake_case: str = 0
@property
def UpperCAmelCase__ ( self : str ):
return self._images
@property
def UpperCAmelCase__ ( self : str ):
return self._labels
@property
def UpperCAmelCase__ ( self : Tuple ):
return self._num_examples
@property
def UpperCAmelCase__ ( self : Dict ):
return self._epochs_completed
def UpperCAmelCase__ ( self : Union[str, Any] , A : int , A : Tuple=False , A : Tuple=True ):
if fake_data:
__snake_case: Tuple = [1] * 784
__snake_case: str = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(A )],
[fake_label for _ in range(A )],
)
__snake_case: Tuple = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__snake_case: str = numpy.arange(self._num_examples )
numpy.random.shuffle(A )
__snake_case: Dict = self.images[perma]
__snake_case: Dict = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__snake_case: Tuple = self._num_examples - start
__snake_case: Union[str, Any] = self._images[start : self._num_examples]
__snake_case: int = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__snake_case: Any = numpy.arange(self._num_examples )
numpy.random.shuffle(A )
__snake_case: Optional[int] = self.images[perm]
__snake_case: List[str] = self.labels[perm]
# Start next epoch
__snake_case: Union[str, Any] = 0
__snake_case: List[str] = batch_size - rest_num_examples
__snake_case: int = self._index_in_epoch
__snake_case: Optional[int] = self._images[start:end]
__snake_case: Any = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__snake_case: List[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(lowerCAmelCase_ , """Please write your own downloading logic.""")
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Tuple:
if not gfile.Exists(lowerCAmelCase_):
gfile.MakeDirs(lowerCAmelCase_)
__snake_case: Any = os.path.join(lowerCAmelCase_ , lowerCAmelCase_)
if not gfile.Exists(lowerCAmelCase_):
urllib.request.urlretrieve(lowerCAmelCase_ , lowerCAmelCase_) # noqa: S310
with gfile.GFile(lowerCAmelCase_) as f:
__snake_case: Union[str, Any] = f.size()
print("""Successfully downloaded""" , lowerCAmelCase_ , lowerCAmelCase_ , """bytes.""")
return filepath
@deprecated(
lowerCAmelCase_ , """Please use alternatives such as:""" """ tensorflow_datasets.load(\'mnist\')""")
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=dtypes.floataa , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=5000 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=DEFAULT_SOURCE_URL , ) -> str:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=lowerCAmelCase_ , one_hot=lowerCAmelCase_ , dtype=lowerCAmelCase_ , seed=lowerCAmelCase_)
__snake_case: str = fake()
__snake_case: Any = fake()
__snake_case: Tuple = fake()
return _Datasets(train=lowerCAmelCase_ , validation=lowerCAmelCase_ , test=lowerCAmelCase_)
if not source_url: # empty string check
__snake_case: Any = DEFAULT_SOURCE_URL
__snake_case: List[str] = """train-images-idx3-ubyte.gz"""
__snake_case: int = """train-labels-idx1-ubyte.gz"""
__snake_case: Any = """t10k-images-idx3-ubyte.gz"""
__snake_case: Optional[Any] = """t10k-labels-idx1-ubyte.gz"""
__snake_case: Any = _maybe_download(
lowerCAmelCase_ , lowerCAmelCase_ , source_url + train_images_file)
with gfile.Open(lowerCAmelCase_ , """rb""") as f:
__snake_case: Optional[int] = _extract_images(lowerCAmelCase_)
__snake_case: List[str] = _maybe_download(
lowerCAmelCase_ , lowerCAmelCase_ , source_url + train_labels_file)
with gfile.Open(lowerCAmelCase_ , """rb""") as f:
__snake_case: int = _extract_labels(lowerCAmelCase_ , one_hot=lowerCAmelCase_)
__snake_case: List[Any] = _maybe_download(
lowerCAmelCase_ , lowerCAmelCase_ , source_url + test_images_file)
with gfile.Open(lowerCAmelCase_ , """rb""") as f:
__snake_case: Optional[Any] = _extract_images(lowerCAmelCase_)
__snake_case: str = _maybe_download(
lowerCAmelCase_ , lowerCAmelCase_ , source_url + test_labels_file)
with gfile.Open(lowerCAmelCase_ , """rb""") as f:
__snake_case: Tuple = _extract_labels(lowerCAmelCase_ , one_hot=lowerCAmelCase_)
if not 0 <= validation_size <= len(lowerCAmelCase_):
__snake_case: Tuple = (
"""Validation size should be between 0 and """
F'''{len(lowerCAmelCase_)}. Received: {validation_size}.'''
)
raise ValueError(lowerCAmelCase_)
__snake_case: Union[str, Any] = train_images[:validation_size]
__snake_case: List[str] = train_labels[:validation_size]
__snake_case: str = train_images[validation_size:]
__snake_case: str = train_labels[validation_size:]
__snake_case: Union[str, Any] = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
__snake_case: Tuple = _DataSet(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_)
__snake_case: Union[str, Any] = _DataSet(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_)
__snake_case: Optional[Any] = _DataSet(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_)
return _Datasets(train=lowerCAmelCase_ , validation=lowerCAmelCase_ , test=lowerCAmelCase_)
| 111 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'vit_mae'
def __init__( self : Union[str, Any] ,snake_case : Any=768 ,snake_case : List[str]=12 ,snake_case : Optional[int]=12 ,snake_case : int=3072 ,snake_case : List[Any]="gelu" ,snake_case : str=0.0 ,snake_case : str=0.0 ,snake_case : Optional[Any]=0.02 ,snake_case : Dict=1e-12 ,snake_case : List[str]=224 ,snake_case : Any=16 ,snake_case : Any=3 ,snake_case : Tuple=True ,snake_case : List[Any]=16 ,snake_case : List[str]=512 ,snake_case : List[Any]=8 ,snake_case : Dict=2048 ,snake_case : Union[str, Any]=0.75 ,snake_case : Union[str, Any]=False ,**snake_case : Optional[int] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =qkv_bias
SCREAMING_SNAKE_CASE =decoder_num_attention_heads
SCREAMING_SNAKE_CASE =decoder_hidden_size
SCREAMING_SNAKE_CASE =decoder_num_hidden_layers
SCREAMING_SNAKE_CASE =decoder_intermediate_size
SCREAMING_SNAKE_CASE =mask_ratio
SCREAMING_SNAKE_CASE =norm_pix_loss
| 334 | 0 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 365 |
"""simple docstring"""
from math import factorial
def snake_case ( A__ = 1_00 ):
return sum(int(A__ ) for x in str(factorial(A__ ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 253 | 0 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class A_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = RoFormerTokenizer
__UpperCamelCase = RoFormerTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def UpperCAmelCase__ ( self :Optional[Any] ) -> str:
super().setUp()
def UpperCAmelCase__ ( self :int , **lowercase_ :List[str] ) -> List[Any]:
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **lowercase_ )
def UpperCAmelCase__ ( self :List[Any] , **lowercase_ :Any ) -> Any:
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **lowercase_ )
def UpperCAmelCase__ ( self :Tuple ) -> Optional[Any]:
UpperCAmelCase = '永和服装饰品有限公司,今天天气非常好'
UpperCAmelCase = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def UpperCAmelCase__ ( self :int ) -> Dict:
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase , UpperCAmelCase = self.get_chinese_input_output_texts()
UpperCAmelCase = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , output_text.split() )
UpperCAmelCase = tokens + [tokenizer.unk_token]
UpperCAmelCase = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def UpperCAmelCase__ ( self :List[str] ) -> int:
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase , UpperCAmelCase = self.get_chinese_input_output_texts()
UpperCAmelCase = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , output_text.split() )
UpperCAmelCase = tokens + [tokenizer.unk_token]
UpperCAmelCase = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> str:
pass
def UpperCAmelCase__ ( self :Any ) -> Optional[int]:
pass
def UpperCAmelCase__ ( self :Tuple ) -> Dict:
pass
| 78 |
from math import sqrt
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase_ ( UpperCamelCase__ : int = 1_0001 ) -> int:
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(UpperCamelCase__ ):
count += 1
while count != nth:
number += 2
if is_prime(UpperCamelCase__ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 90 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__A : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCamelCase ( _A : str ) ->Dict:
"""simple docstring"""
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_A ):
return ext
raise Exception(
f'Unable to determine file format from file extension {path}. '
f'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def __UpperCamelCase ( _A : str ) ->Any:
"""simple docstring"""
lowerCamelCase_ =pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
lowerCamelCase_ =try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
lowerCamelCase_ =PipelineDataFormat.from_str(
format=_A , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(_A , _A )
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[int]:
lowerCamelCase_ =nlp
lowerCamelCase_ =reader
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE )-> List[str]:
lowerCamelCase_ =parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=_SCREAMING_SNAKE_CASE , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=_SCREAMING_SNAKE_CASE , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=_SCREAMING_SNAKE_CASE , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=_SCREAMING_SNAKE_CASE , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=_SCREAMING_SNAKE_CASE , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=_SCREAMING_SNAKE_CASE , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=_SCREAMING_SNAKE_CASE , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=_SCREAMING_SNAKE_CASE , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ , lowerCamelCase_ =self._nlp, []
for entry in self._reader:
lowerCamelCase_ =nlp(**_SCREAMING_SNAKE_CASE ) if self._reader.is_multi_columns else nlp(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
outputs.append(_SCREAMING_SNAKE_CASE )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowerCamelCase_ =self._reader.save_binary(_SCREAMING_SNAKE_CASE )
logger.warning(f'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(_SCREAMING_SNAKE_CASE )
| 351 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__A : int = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
__A : Any = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
__A : Union[str, Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _SCREAMING_SNAKE_CASE ( datasets.Metric):
def _snake_case ( self )-> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="auto" , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=500 , _SCREAMING_SNAKE_CASE="gpt2-large" , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=25 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=25 , )-> List[str]:
lowerCamelCase_ =compute_mauve(
p_text=_SCREAMING_SNAKE_CASE , q_text=_SCREAMING_SNAKE_CASE , p_features=_SCREAMING_SNAKE_CASE , q_features=_SCREAMING_SNAKE_CASE , p_tokens=_SCREAMING_SNAKE_CASE , q_tokens=_SCREAMING_SNAKE_CASE , num_buckets=_SCREAMING_SNAKE_CASE , pca_max_data=_SCREAMING_SNAKE_CASE , kmeans_explained_var=_SCREAMING_SNAKE_CASE , kmeans_num_redo=_SCREAMING_SNAKE_CASE , kmeans_max_iter=_SCREAMING_SNAKE_CASE , featurize_model_name=_SCREAMING_SNAKE_CASE , device_id=_SCREAMING_SNAKE_CASE , max_text_length=_SCREAMING_SNAKE_CASE , divergence_curve_discretization_size=_SCREAMING_SNAKE_CASE , mauve_scaling_factor=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , seed=_SCREAMING_SNAKE_CASE , )
return out
| 49 | 0 |
from datetime import datetime as dt
import os
from github import Github
A : Optional[int] = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def __lowerCAmelCase ( ) -> Any:
__a = Github(os.environ['''GITHUB_TOKEN'''] )
__a = g.get_repo('''huggingface/transformers''' )
__a = repo.get_issues(state='''open''' )
for issue in open_issues:
__a = sorted([comment for comment in issue.get_comments()] , key=lambda a__ : i.created_at , reverse=__lowerCAmelCase )
__a = comments[0] if len(__lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 6 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Optional[torch.FloatTensor] = None
A__ : torch.FloatTensor = None
A__ : Optional[Tuple[torch.FloatTensor]] = None
A__ : Optional[Tuple[torch.FloatTensor]] = None
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : int=1 , _snake_case : int=0 , _snake_case : List[str]=2 , _snake_case : List[str]=512 , _snake_case : Tuple="cls" , _snake_case : Union[str, Any]=False , _snake_case : str=True , **_snake_case : Union[str, Any] , ):
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
__lowercase : Union[str, Any] = project_dim
__lowercase : str = pooler_fn
__lowercase : List[str] = learn_encoder
__lowercase : int = use_attention_mask
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Any = [r'''pooler''', r'''logit_scale''']
A__ : Dict = [r'''position_ids''', r'''predictions.decoder.bias''']
A__ : Union[str, Any] = '''roberta'''
A__ : str = RobertaSeriesConfig
def __init__( self : List[str] , _snake_case : Any ):
super().__init__(_snake_case )
__lowercase : Union[str, Any] = XLMRobertaModel(_snake_case )
__lowercase : Optional[int] = nn.Linear(config.hidden_size , config.project_dim )
__lowercase : Optional[int] = getattr(_snake_case , '''has_pre_transformation''' , _snake_case )
if self.has_pre_transformation:
__lowercase : Union[str, Any] = nn.Linear(config.hidden_size , config.project_dim )
__lowercase : Any = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def snake_case_ ( self : Dict , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , ):
__lowercase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase : Any = self.base_model(
input_ids=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , position_ids=_snake_case , head_mask=_snake_case , inputs_embeds=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_attentions=_snake_case , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_snake_case , )
if self.has_pre_transformation:
__lowercase : Optional[int] = outputs['''hidden_states'''][-2]
__lowercase : Union[str, Any] = self.pre_LN(_snake_case )
__lowercase : Optional[int] = self.transformation_pre(_snake_case )
return TransformationModelOutput(
projection_state=_snake_case , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__lowercase : str = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_snake_case , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 156 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 360 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 181 | 0 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
__a = float("nan")
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : str , snake_case_ : str ):
snake_case__ : Union[str, Any] = sys.stdout
snake_case__ : int = open(snake_case_ , """a""" )
def __getattr__( self : Tuple , snake_case_ : Optional[Any] ):
return getattr(self.stdout , snake_case_ )
def lowerCamelCase ( self : Any , snake_case_ : List[str] ):
self.stdout.write(snake_case_ )
# strip tqdm codes
self.file.write(re.sub(r"""^.*\r""" , """""" , snake_case_ , 0 , re.M ) )
def __snake_case( _lowerCAmelCase=80 , _lowerCAmelCase=False ) -> List[str]:
snake_case__ : Union[str, Any] = []
# deal with critical env vars
snake_case__ : Optional[int] = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
snake_case__ : List[str] = os.environ.get(_lowerCAmelCase , _lowerCAmelCase )
if val is not None:
cmd.append(f"{key}={val}" )
# python executable (not always needed if the script is executable)
snake_case__ : Optional[Any] = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(_lowerCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
snake_case__ : List[Any] = []
snake_case__ : Any = """"""
while len(_lowerCAmelCase ) > 0:
current_line += f"{cmd.pop(0 )} "
if len(_lowerCAmelCase ) == 0 or len(_lowerCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_lowerCAmelCase )
snake_case__ : int = """"""
return "\\\n".join(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
# unwrap multi-line input
snake_case__ : Dict = re.sub(r"""[\\\n]+""" , """ """ , args.base_cmd )
# remove --output_dir if any and set our own
snake_case__ : Any = re.sub("""--output_dir\s+[^\s]+""" , """""" , args.base_cmd )
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
snake_case__ : str = re.sub("""--overwrite_output_dir\s+""" , """""" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
snake_case__ : Union[str, Any] = subprocess.run(_lowerCAmelCase , capture_output=_lowerCAmelCase , text=_lowerCAmelCase )
if verbose:
print("""STDOUT""" , result.stdout )
print("""STDERR""" , result.stderr )
# save the streams
snake_case__ : Dict = variation.replace(""" """ , """-""" )
with open(Path(_lowerCAmelCase ) / f"log.{prefix}.stdout.txt" , """w""" ) as f:
f.write(result.stdout )
with open(Path(_lowerCAmelCase ) / f"log.{prefix}.stderr.txt" , """w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json" , """r""" , encoding="""utf-8""" ) as f:
snake_case__ : Dict = json.load(_lowerCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Dict:
snake_case__ : Any = []
snake_case__ : int = []
snake_case__ : Tuple = f"{id}: {variation:<{longest_variation_len}}"
snake_case__ : Optional[Any] = f"{preamble}: "
snake_case__ : Optional[Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_lowerCAmelCase ) , desc=_lowerCAmelCase , leave=_lowerCAmelCase ):
snake_case__ : Dict = process_run_single(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Tuple = single_run_metrics[target_metric_key]
if not math.isnan(_lowerCAmelCase ):
metrics.append(_lowerCAmelCase )
results.append(_lowerCAmelCase )
outcome += "✓"
else:
outcome += "✘"
snake_case__ : str = f"\33[2K\r{outcome}"
if len(_lowerCAmelCase ) > 0:
snake_case__ : List[Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
snake_case__ : Any = round(mean_metrics[target_metric_key] , 2 )
snake_case__ : Optional[Any] = f"{outcome} {mean_target}"
if len(_lowerCAmelCase ) > 1:
results_str += f" {tuple(round(_lowerCAmelCase , 2 ) for x in results )}"
print(_lowerCAmelCase )
snake_case__ : Optional[Any] = variation
return mean_metrics
else:
print(_lowerCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def __snake_case( ) -> Any:
snake_case__ : int = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
snake_case__ : List[Any] = pd.DataFrame(_lowerCAmelCase )
snake_case__ : Union[str, Any] = """variation"""
snake_case__ : int = """diff_%"""
snake_case__ : List[Any] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
snake_case__ : Tuple = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_lowerCAmelCase ):
# as a fallback, use the minimal value as the sentinel
snake_case__ : Optional[int] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_lowerCAmelCase ):
snake_case__ : Optional[Any] = df.apply(
lambda _lowerCAmelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="""columns""" , )
# re-order columns
snake_case__ : str = [variation_key, target_metric_key, diff_key, *report_metric_keys]
snake_case__ : int = df.reindex(_lowerCAmelCase , axis="""columns""" ) # reorder cols
# capitalize
snake_case__ : Any = df.rename(str.capitalize , axis="""columns""" )
# make the cols as narrow as possible
snake_case__ : Optional[Any] = df.rename(lambda _lowerCAmelCase : c.replace("""_""" , """<br>""" ) , axis="""columns""" )
snake_case__ : Optional[Any] = df.rename(lambda _lowerCAmelCase : c.replace("""_""" , """\n""" ) , axis="""columns""" )
snake_case__ : Optional[int] = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_lowerCAmelCase , floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_lowerCAmelCase , floatfmt=""".2f""" )]
print("""\n\n""".join(_lowerCAmelCase ) )
def __snake_case( ) -> Any:
snake_case__ : int = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""Base cmd""" , )
parser.add_argument(
"""--variations""" , default=_lowerCAmelCase , type=_lowerCAmelCase , nargs="""+""" , required=_lowerCAmelCase , help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" , )
parser.add_argument(
"""--base-variation""" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" , )
parser.add_argument(
"""--target-metric-key""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" , )
parser.add_argument(
"""--report-metric-keys""" , default="""""" , type=_lowerCAmelCase , help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" , )
parser.add_argument(
"""--repeat-times""" , default=1 , type=_lowerCAmelCase , help="""How many times to re-run each variation - an average will be reported""" , )
parser.add_argument(
"""--output_dir""" , default="""output_benchmark""" , type=_lowerCAmelCase , help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" , )
parser.add_argument(
"""--verbose""" , default=_lowerCAmelCase , action="""store_true""" , help="""Whether to show the outputs of each run or just the benchmark progress""" , )
snake_case__ : int = parser.parse_args()
snake_case__ : Dict = args.output_dir
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
snake_case__ : Dict = get_base_command(_lowerCAmelCase , _lowerCAmelCase )
# split each dimension into its --foo variations
snake_case__ : Dict = [list(map(str.strip , re.split(r"""\|""" , _lowerCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
snake_case__ : List[str] = list(map(str.strip , map(""" """.join , itertools.product(*_lowerCAmelCase ) ) ) )
snake_case__ : List[str] = max(len(_lowerCAmelCase ) for x in variations )
# split wanted keys
snake_case__ : int = args.report_metric_keys.split()
# capture prints into a log file for convenience
snake_case__ : str = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(f"and this script's output is also piped into {report_fn}" )
snake_case__ : Optional[int] = Tee(_lowerCAmelCase )
print(f"\n*** Running {len(_lowerCAmelCase )} benchmarks:" )
print(f"Base command: {' '.join(_lowerCAmelCase )}" )
snake_case__ : Any = """variation"""
snake_case__ : str = []
for id, variation in enumerate(tqdm(_lowerCAmelCase , desc="""Total completion: """ , leave=_lowerCAmelCase ) ):
snake_case__ : str = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.target_metric_key , _lowerCAmelCase , args.repeat_times , _lowerCAmelCase , args.verbose , ) )
process_results(_lowerCAmelCase , args.target_metric_key , _lowerCAmelCase , args.base_variation , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 35 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = x
__lowercase = y
for step in range(A__ ): # noqa: B007
__lowercase = a * a - b * b + x
__lowercase = 2 * a * b + y
__lowercase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _A ( A__ ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _A ( A__ ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(A__ , 1 , 1 ) )
def _A ( A__ = 800 , A__ = 600 , A__ = -0.6 , A__ = 0 , A__ = 3.2 , A__ = 50 , A__ = True , ):
"""simple docstring"""
__lowercase = Image.new('''RGB''' , (image_width, image_height) )
__lowercase = img.load()
# loop through the image-coordinates
for image_x in range(A__ ):
for image_y in range(A__ ):
# determine the figure-coordinates based on the image-coordinates
__lowercase = figure_width / image_width * image_height
__lowercase = figure_center_x + (image_x / image_width - 0.5) * figure_width
__lowercase = figure_center_y + (image_y / image_height - 0.5) * figure_height
__lowercase = get_distance(A__ , A__ , A__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__lowercase = get_color_coded_rgb(A__ )
else:
__lowercase = get_black_and_white_rgb(A__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 104 | 0 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=128 , _A=32 , _A=16 , _A=2 , _A=0.0_2 , _A=3 , _A=4 , _A=None , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A ( self ):
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def _A ( self ):
'''simple docstring'''
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _A ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = NezhaModel(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , token_type_ids=_A )
__SCREAMING_SNAKE_CASE = model(_A , token_type_ids=_A )
__SCREAMING_SNAKE_CASE = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _A ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = NezhaModel(_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE = model(
_A , attention_mask=_A , token_type_ids=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
__SCREAMING_SNAKE_CASE = model(
_A , attention_mask=_A , token_type_ids=_A , encoder_hidden_states=_A , )
__SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , token_type_ids=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _A ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = NezhaForMaskedLM(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = NezhaForNextSentencePrediction(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _A ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = NezhaForPreTraining(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , next_sentence_label=_A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _A ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = NezhaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = NezhaForSequenceClassification(_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = NezhaForTokenClassification(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = NezhaForMultipleChoice(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : int = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ : Optional[int] = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : List[Any] = True
def _A ( self , _A , _A , _A=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class in get_values(_A ):
__SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_A )
__SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = NezhaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_A , hidden_size=37 )
def _A ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_A )
def _A ( self ):
'''simple docstring'''
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__SCREAMING_SNAKE_CASE = None
self.model_tester.create_and_check_model_as_decoder(
_A , _A , _A , _A , _A , _A , _A , _A , _A , )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_A )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def _A ( self ):
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(config=_A )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(_A , _A )
__SCREAMING_SNAKE_CASE = torch.jit.trace(
_A , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A , os.path.join(_A , 'bert.pt' ) )
__SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(_A , 'bert.pt' ) , map_location=_A )
loaded(inputs_dict['input_ids'].to(_A ) , inputs_dict['attention_mask'].to(_A ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _A )
__SCREAMING_SNAKE_CASE = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
@slow
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape , _A )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
| 118 |
import os
def __lowercase ( a__ = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(a__ ) , a__ ) ) as input_file:
__SCREAMING_SNAKE_CASE = [
[int(a__ ) for element in line.split(',' )]
for line in input_file.readlines()
]
__SCREAMING_SNAKE_CASE = len(a__ )
__SCREAMING_SNAKE_CASE = len(matrix[0] )
__SCREAMING_SNAKE_CASE = [[-1 for _ in range(a__ )] for _ in range(a__ )]
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = matrix[i][0]
for j in range(1 , a__ ):
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , a__ ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 118 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=99 , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=16 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : Dict = 13
UpperCAmelCase_ : List[str] = 7
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Union[str, Any] = 99
UpperCAmelCase_ : Tuple = 384
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : Any = 4
UpperCAmelCase_ : int = 37
UpperCAmelCase_ : Dict = "gelu"
UpperCAmelCase_ : Optional[int] = 0.1
UpperCAmelCase_ : Tuple = 0.1
UpperCAmelCase_ : Optional[Any] = 512
UpperCAmelCase_ : Dict = 16
UpperCAmelCase_ : int = 2
UpperCAmelCase_ : Union[str, Any] = 0.02
UpperCAmelCase_ : int = 3
UpperCAmelCase_ : Optional[Any] = 4
UpperCAmelCase_ : Any = 128
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : List[Any] = 9
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : List[Any] = None
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_input_mask:
UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : List[Any] = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : str = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowercase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = TFConvBertModel(config=lowercase_ )
UpperCAmelCase_ : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase_ : List[str] = [input_ids, input_mask]
UpperCAmelCase_ : Union[str, Any] = model(lowercase_ )
UpperCAmelCase_ : str = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = TFConvBertForMaskedLM(config=lowercase_ )
UpperCAmelCase_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase_ : Tuple = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : List[str] = TFConvBertForSequenceClassification(config=lowercase_ )
UpperCAmelCase_ : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = self.num_choices
UpperCAmelCase_ : Tuple = TFConvBertForMultipleChoice(config=lowercase_ )
UpperCAmelCase_ : str = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ : Tuple = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ : Optional[Any] = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ : Any = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCAmelCase_ : Union[str, Any] = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.num_labels
UpperCAmelCase_ : int = TFConvBertForTokenClassification(config=lowercase_ )
UpperCAmelCase_ : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase_ : Any = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = TFConvBertForQuestionAnswering(config=lowercase_ )
UpperCAmelCase_ : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase_ : Optional[int] = model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Optional[int] = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Dict = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = TFConvBertModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Tuple = True
if hasattr(lowercase_ , "use_cache" ):
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Any = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase_ : List[Any] = getattr(self.model_tester , "key_length" , lowercase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = self._prepare_for_class(lowercase_ , lowercase_ )
UpperCAmelCase_ : Dict = model_class(lowercase_ )
UpperCAmelCase_ : Dict = len(model(lowercase_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ , saved_model=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = os.path.join(lowercase_ , "saved_model" , "1" )
UpperCAmelCase_ : str = tf.keras.models.load_model(lowercase_ )
UpperCAmelCase_ : Optional[int] = model(lowercase_ )
if self.is_encoder_decoder:
UpperCAmelCase_ : List[Any] = outputs["encoder_hidden_states"]
UpperCAmelCase_ : Union[str, Any] = outputs["encoder_attentions"]
else:
UpperCAmelCase_ : List[Any] = outputs["hidden_states"]
UpperCAmelCase_ : int = outputs["attentions"]
self.assertEqual(len(lowercase_ ) , lowercase_ )
UpperCAmelCase_ : str = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowercase_ ) , lowercase_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : List[Any] = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase_ : Tuple = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase_ : Optional[Any] = getattr(self.model_tester , "key_length" , lowercase_ )
UpperCAmelCase_ : List[Any] = getattr(self.model_tester , "key_length" , lowercase_ )
def check_decoder_attentions_output(lowercase_ ):
UpperCAmelCase_ : Tuple = len(lowercase_ )
self.assertEqual(out_len % 2 , 0 )
UpperCAmelCase_ : Any = outputs.decoder_attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowercase_ ):
UpperCAmelCase_ : Union[str, Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : List[str] = model_class(lowercase_ )
UpperCAmelCase_ : int = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
UpperCAmelCase_ : Any = len(lowercase_ )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
if self.is_encoder_decoder:
UpperCAmelCase_ : List[Any] = model_class(lowercase_ )
UpperCAmelCase_ : Optional[Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_decoder_attentions_output(lowercase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : List[Any] = model_class(lowercase_ )
UpperCAmelCase_ : Optional[Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
# Check attention is always last and order is fine
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Optional[Any] = model_class(lowercase_ )
UpperCAmelCase_ : List[str] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase_ ) )
self.assertEqual(model.config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
@require_tf
class A_ (unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
UpperCAmelCase_ : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase_ : str = model(lowercase_ )[0]
UpperCAmelCase_ : Any = [1, 6, 768]
self.assertEqual(output.shape , lowercase_ )
UpperCAmelCase_ : str = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1E-4 )
| 61 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Optional[int]:
if "xprophetnet" in prophetnet_checkpoint_path:
__lowerCamelCase : Union[str, Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(_lowerCAmelCase )
__lowerCamelCase ,__lowerCamelCase : List[str] = XLMProphetNetForConditionalGeneration.from_pretrained(
_lowerCAmelCase ,output_loading_info=_lowerCAmelCase )
else:
__lowerCamelCase : Optional[int] = ProphetNetForConditionalGenerationOld.from_pretrained(_lowerCAmelCase )
__lowerCamelCase ,__lowerCamelCase : List[str] = ProphetNetForConditionalGeneration.from_pretrained(
_lowerCAmelCase ,output_loading_info=_lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = ['key_proj', 'value_proj', 'query_proj']
__lowerCamelCase : Optional[Any] = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
__lowerCamelCase : Optional[int] = key.split('.' )
if attributes[0] == "lm_head":
__lowerCamelCase : Dict = prophet
__lowerCamelCase : List[Any] = prophet_old
else:
__lowerCamelCase : Any = prophet.prophetnet
__lowerCamelCase : Any = prophet_old.model
__lowerCamelCase : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
__lowerCamelCase : Any = mapping[attribute]
if not hasattr(_lowerCAmelCase ,_lowerCAmelCase ) and len(_lowerCAmelCase ) > 0:
__lowerCamelCase : int = attribute
elif hasattr(_lowerCAmelCase ,_lowerCAmelCase ):
__lowerCamelCase : Optional[int] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowerCamelCase : List[Any] = old_model.weight
logger.info(F'{attribute} is initialized.' )
__lowerCamelCase : List[Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowerCamelCase : List[Any] = old_model.bias
logger.info(F'{attribute} is initialized' )
__lowerCamelCase : Dict = True
break
elif attribute in special_keys and hasattr(_lowerCAmelCase ,'in_proj_weight' ):
__lowerCamelCase : Optional[Any] = old_model.in_proj_weight.shape[0] // 3
__lowerCamelCase : Optional[Any] = getattr(_lowerCAmelCase ,_lowerCAmelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowerCamelCase : Optional[int] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowerCamelCase : Dict = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowerCamelCase : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowerCamelCase : Dict = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowerCamelCase : str = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowerCamelCase : Optional[int] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowerCamelCase : Optional[int] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowerCamelCase : Optional[int] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowerCamelCase : Dict = True
break
if attribute.isdigit():
__lowerCamelCase : List[str] = model[int(_lowerCAmelCase )]
__lowerCamelCase : Union[str, Any] = old_model[int(_lowerCAmelCase )]
else:
__lowerCamelCase : Union[str, Any] = getattr(_lowerCAmelCase ,_lowerCAmelCase )
if old_attribute == "":
__lowerCamelCase : str = old_model
else:
if not hasattr(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
__lowerCamelCase : str = getattr(_lowerCAmelCase ,_lowerCAmelCase )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 208 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , _UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCAmelCase , atol=1e-3 ) )
@slow
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , _UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCAmelCase , atol=1e-3 ) )
| 241 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return [sentence[i : i + ngram_size] for i in range(len(lowerCAmelCase__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 241 | 1 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_ , ) -> str:
__lowercase : Optional[int] = parent
__lowercase : Dict = 13
__lowercase : str = 7
__lowercase : List[str] = 30
__lowercase : Any = self.seq_length + self.mem_len
__lowercase : Optional[Any] = 15
__lowercase : Dict = True
__lowercase : str = True
__lowercase : List[str] = 99
__lowercase : Optional[int] = [10, 50, 80]
__lowercase : List[str] = 32
__lowercase : Optional[int] = 32
__lowercase : Any = 4
__lowercase : Tuple = 8
__lowercase : Any = 1_28
__lowercase : Dict = 2
__lowercase : Optional[Any] = 2
__lowercase : List[str] = None
__lowercase : List[str] = 1
__lowercase : List[str] = 0
__lowercase : List[Any] = 3
__lowercase : Dict = self.vocab_size - 1
__lowercase : List[Any] = 0.0_1
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Any = None
if self.use_labels:
__lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Any = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def _lowerCamelCase ( self ) -> List[Any]:
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
__lowercase : List[str] = TFTransfoXLModel(UpperCamelCase_ )
__lowercase ,__lowercase : Optional[int] = model(UpperCamelCase_ ).to_tuple()
__lowercase : List[Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a}
__lowercase ,__lowercase : Union[str, Any] = model(UpperCamelCase_ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
__lowercase : List[Any] = TFTransfoXLLMHeadModel(UpperCamelCase_ )
__lowercase ,__lowercase : Union[str, Any] = model(UpperCamelCase_ ).to_tuple()
__lowercase : List[str] = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
__lowercase ,__lowercase : Tuple = model(UpperCamelCase_ ).to_tuple()
__lowercase ,__lowercase : str = model([input_ids_a, mems_a] ).to_tuple()
__lowercase : Any = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
__lowercase ,__lowercase : Any = model(UpperCamelCase_ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
__lowercase : List[Any] = TFTransfoXLForSequenceClassification(UpperCamelCase_ )
__lowercase : str = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self ) -> int:
__lowercase : Tuple = self.prepare_config_and_inputs()
((__lowercase) ,(__lowercase) ,(__lowercase) ,(__lowercase)) : Union[str, Any] = config_and_inputs
__lowercase : Dict = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase =(
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase =() if is_tf_available() else ()
UpperCamelCase =(
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
UpperCamelCase =False
UpperCamelCase =False
UpperCamelCase =False
UpperCamelCase =False
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : List[str] = TFTransfoXLModelTester(self )
__lowercase : Optional[int] = ConfigTester(self , config_class=UpperCamelCase_ , d_embed=37 )
def _lowerCamelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ) -> Optional[int]:
self.model_tester.set_seed()
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> List[Any]:
self.model_tester.set_seed()
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> str:
__lowercase ,__lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Optional[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(UpperCamelCase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
__lowercase : str = model.get_output_embeddings()
assert isinstance(UpperCamelCase_ , tf.keras.layers.Layer )
__lowercase : int = model.get_bias()
assert name is None
else:
__lowercase : Optional[Any] = model.get_output_embeddings()
assert x is None
__lowercase : int = model.get_bias()
assert name is None
def _lowerCamelCase ( self ) -> List[str]:
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def _lowerCamelCase ( self ) -> Tuple:
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[Any] = TFTransfoXLModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def _lowerCamelCase ( self ) -> List[Any]:
pass
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Any = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
__lowercase : Optional[Any] = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
__lowercase : Union[str, Any] = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
__lowercase : List[str] = model.generate(UpperCamelCase_ , max_length=2_00 , do_sample=UpperCamelCase_ )
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase_ )
| 249 |
"""simple docstring"""
from __future__ import annotations
a_ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
__lowercase : Union[str, Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCamelCase ) )
] # the reference grid
__lowercase : Optional[int] = 1
__lowercase : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCamelCase ) )
] # the action grid
__lowercase : List[str] = init[0]
__lowercase : Optional[Any] = init[1]
__lowercase : int = 0
__lowercase : List[Any] = g + heuristic[x][y] # cost from starting cell to destination cell
__lowercase : Optional[Any] = [[f, g, x, y]]
__lowercase : Union[str, Any] = False # flag that is set when search is complete
__lowercase : List[Any] = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCamelCase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__lowercase : str = cell.pop()
__lowercase : List[Any] = next_cell[2]
__lowercase : Optional[int] = next_cell[3]
__lowercase : Dict = next_cell[1]
if x == goal[0] and y == goal[1]:
__lowercase : List[Any] = True
else:
for i in range(len(__UpperCamelCase ) ): # to try out different valid actions
__lowercase : Union[str, Any] = x + DIRECTIONS[i][0]
__lowercase : Optional[int] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__lowercase : str = g + cost
__lowercase : Optional[int] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__lowercase : Dict = 1
__lowercase : List[Any] = i
__lowercase : Dict = []
__lowercase : List[Any] = goal[0]
__lowercase : Tuple = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__lowercase : Any = x - DIRECTIONS[action[x][y]][0]
__lowercase : Dict = y - DIRECTIONS[action[x][y]][1]
__lowercase : List[Any] = xa
__lowercase : Optional[Any] = ya
invpath.append([x, y] )
__lowercase : Optional[int] = []
for i in range(len(__UpperCamelCase ) ):
path.append(invpath[len(__UpperCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
a_ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
a_ = [0, 0]
# all coordinates are given in format [y,x]
a_ = [len(grid) - 1, len(grid[0]) - 1]
a_ = 1
# the cost map which pushes the path closer to the goal
a_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
a_ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
a_ = 9_9
a_ , a_ = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 249 | 1 |
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCamelCase_ ( lowerCAmelCase__ : BertModel , lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Tuple = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
lowerCAmelCase_ : Union[str, Any] = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
lowerCAmelCase_ : str = model.state_dict()
def to_tf_var_name(lowerCAmelCase__ : str ):
for patt, repl in iter(lowerCAmelCase__ ):
lowerCAmelCase_ : int = name.replace(lowerCAmelCase__ , lowerCAmelCase__ )
return f"bert/{name}"
def create_tf_var(lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : str , lowerCAmelCase__ : tf.Session ):
lowerCAmelCase_ : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
lowerCAmelCase_ : str = tf.get_variable(dtype=lowerCAmelCase__ , shape=tensor.shape , name=lowerCAmelCase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCAmelCase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowerCAmelCase_ : List[str] = to_tf_var_name(lowerCAmelCase__ )
lowerCAmelCase_ : Any = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowerCAmelCase_ : str = torch_tensor.T
lowerCAmelCase_ : Tuple = create_tf_var(tensor=lowerCAmelCase__ , name=lowerCAmelCase__ , session=lowerCAmelCase__ )
tf.keras.backend.set_value(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : int = session.run(lowerCAmelCase__ )
print(f"Successfully created {tf_name}: {np.allclose(lowerCAmelCase__ , lowerCAmelCase__ )}" )
lowerCAmelCase_ : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, Any]=None ) -> str:
"""simple docstring"""
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='Directory in which to save tensorflow model' )
lowerCAmelCase_ : Optional[Any] = parser.parse_args(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCAmelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 289 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowercase__ : Optional[int] = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 289 | 1 |
"""simple docstring"""
from math import pi
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 177 |
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 168 | 0 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_snake_case = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_snake_case = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_snake_case = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len([g for position, g in enumerate(_lowerCamelCase ) if g == main_target[position]] )
return (item, float(_lowerCamelCase ))
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = random.randint(0 , len(_lowerCamelCase ) - 1 )
_lowerCAmelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
_lowerCAmelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = list(_lowerCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_lowerCAmelCase : List[str] = random.choice(_lowerCamelCase )
return "".join(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : str = []
# Generate more children proportionally to the fitness score.
_lowerCAmelCase : Any = int(parent_a[1] * 100 ) + 1
_lowerCAmelCase : Tuple = 10 if child_n >= 10 else child_n
for _ in range(_lowerCamelCase ):
_lowerCAmelCase : List[str] = population_score[random.randint(0 , _lowerCamelCase )][0]
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = crossover(parent_a[0] , _lowerCamelCase )
# Append new string to the population list.
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
return pop
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True ):
'''simple docstring'''
if N_POPULATION < N_SELECTED:
_lowerCAmelCase : Optional[int] = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(_lowerCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
_lowerCAmelCase : List[Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_lowerCAmelCase : Optional[Any] = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(_lowerCamelCase )
# Generate random starting population.
_lowerCAmelCase : Tuple = []
for _ in range(_lowerCamelCase ):
population.append("".join([random.choice(_lowerCamelCase ) for i in range(len(_lowerCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowerCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_lowerCAmelCase : List[str] = [evaluate(_lowerCamelCase , _lowerCamelCase ) for item in population]
# Check if there is a matching evolution.
_lowerCAmelCase : int = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] , reverse=_lowerCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_lowerCAmelCase : Optional[int] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowerCamelCase )
# Normalize population score to be between 0 and 1.
_lowerCAmelCase : Union[str, Any] = [
(item, score / len(_lowerCamelCase )) for item, score in population_score
]
# This is selection
for i in range(_lowerCamelCase ):
population.extend(select(population_score[int(_lowerCamelCase )] , _lowerCamelCase , _lowerCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowerCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
_snake_case = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_snake_case = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_snake_case, _snake_case, _snake_case = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 300 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase : int = ""
else:
_lowerCAmelCase : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : int = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ViTConfig()
_lowerCAmelCase : str = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = int(vit_name[-12:-10] )
_lowerCAmelCase : str = int(vit_name[-9:-6] )
else:
_lowerCAmelCase : List[str] = 1_000
_lowerCAmelCase : int = "huggingface/label-files"
_lowerCAmelCase : Dict = "imagenet-1k-id2label.json"
_lowerCAmelCase : Dict = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[int] = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : str = int(vit_name[-6:-4] )
_lowerCAmelCase : List[str] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCAmelCase : str = 192
_lowerCAmelCase : Union[str, Any] = 768
_lowerCAmelCase : str = 12
_lowerCAmelCase : Any = 3
elif vit_name[9:].startswith("small" ):
_lowerCAmelCase : Any = 384
_lowerCAmelCase : Any = 1_536
_lowerCAmelCase : List[str] = 12
_lowerCAmelCase : Tuple = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCAmelCase : Optional[Any] = 768
_lowerCAmelCase : str = 2_304
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : List[str] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCAmelCase : Optional[Any] = 1_024
_lowerCAmelCase : List[str] = 4_096
_lowerCAmelCase : Dict = 24
_lowerCAmelCase : int = 16
elif vit_name[4:].startswith("huge" ):
_lowerCAmelCase : Union[str, Any] = 1_280
_lowerCAmelCase : Optional[int] = 5_120
_lowerCAmelCase : Optional[Any] = 32
_lowerCAmelCase : str = 16
# load original model from timm
_lowerCAmelCase : List[Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase : List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCAmelCase : Optional[int] = ViTModel(_lowerCamelCase ).eval()
else:
_lowerCAmelCase : Optional[int] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCAmelCase : Tuple = DeiTImageProcessor(size=config.image_size )
else:
_lowerCAmelCase : Dict = ViTImageProcessor(size=config.image_size )
_lowerCAmelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase : Union[str, Any] = encoding["pixel_values"]
_lowerCAmelCase : List[str] = model(_lowerCamelCase )
if base_model:
_lowerCAmelCase : List[str] = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
_lowerCAmelCase : Any = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 300 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__lowercase : List[Any] = None
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase : Optional[Any] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__lowercase : List[str] = {
'''google/rembert''': 256,
}
__lowercase : List[Any] = '''▁'''
class __lowercase ( _lowercase ):
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = RemBertTokenizer
def __init__(self , A=None , A=None , A=True , A=True , A=False , A="[CLS]" , A="[SEP]" , A="<unk>" , A="[SEP]" , A="<pad>" , A="[CLS]" , A="[MASK]" , **A , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , **A , )
lowerCamelCase_ : Any = do_lower_case
lowerCamelCase_ : Union[str, Any] = remove_space
lowerCamelCase_ : Optional[Any] = keep_accents
lowerCamelCase_ : str = vocab_file
lowerCamelCase_ : str = False if not self.vocab_file else True
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ (self , A , A = None , A = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : int = [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ (self , A , A = None ):
if not os.path.isdir(A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(A ) )
return
lowerCamelCase_ : Dict = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 318 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = tempfile.mkdtemp()
lowerCamelCase_ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ : Tuple = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A , A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCamelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.get_tokenizer()
lowerCamelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase_ : List[Any] = self.get_image_processor()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
lowerCamelCase_ : List[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ : List[str] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
lowerCamelCase_ : Dict = self.get_image_processor(do_normalize=A )
lowerCamelCase_ : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : List[str] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = self.prepare_image_inputs()
lowerCamelCase_ : List[Any] = image_processor(A , return_tensors='''np''' )
lowerCamelCase_ : Optional[int] = processor(images=A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.get_image_processor()
lowerCamelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase_ : str = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : int = processor(text=A )
lowerCamelCase_ : Dict = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : List[Any] = self.prepare_image_inputs()
lowerCamelCase_ : Optional[int] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Any = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ : Union[str, Any] = processor.batch_decode(A )
lowerCamelCase_ : Any = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : str = self.prepare_image_inputs()
lowerCamelCase_ : int = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 318 | 1 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_snake_case = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
_snake_case = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
_snake_case = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _A ( __magic_name__ , __magic_name__ ):
return float((preds == labels).mean() )
def _A ( __magic_name__ , __magic_name__ , __magic_name__="binary" ):
lowercase__ = simple_accuracy(__magic_name__ , __magic_name__ )
lowercase__ = float(fa_score(y_true=__magic_name__ , y_pred=__magic_name__ , average=__magic_name__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = {}
for id_pred, label in zip(__magic_name__ , __magic_name__ ):
lowercase__ = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowercase__ = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowercase__ = [(pred, label)]
lowercase__ , lowercase__ = [], []
for question, preds_labels in question_map.items():
lowercase__ , lowercase__ = zip(*__magic_name__ )
lowercase__ = fa_score(y_true=__magic_name__ , y_pred=__magic_name__ , average="macro" )
fas.append(__magic_name__ )
lowercase__ = int(sum(pred == label for pred, label in preds_labels ) == len(__magic_name__ ) )
ems.append(__magic_name__ )
lowercase__ = float(sum(__magic_name__ ) / len(__magic_name__ ) )
lowercase__ = sum(__magic_name__ ) / len(__magic_name__ )
lowercase__ = float(fa_score(y_true=__magic_name__ , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def UpperCAmelCase ( self :List[str] , _lowercase :List[Any] , _lowercase :Optional[int] ):
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowercase , _lowercase )}
elif self.config_name == "cb":
return acc_and_fa(_lowercase , _lowercase , fa_avg="macro" )
elif self.config_name == "record":
lowercase__ = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
lowercase__ = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(_lowercase , _lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowercase , _lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 201 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _A ( __magic_name__=32 , __magic_name__=10 , __magic_name__=100 , __magic_name__=1026 , __magic_name__=True , __magic_name__="data/tokenized_stories_train_wikitext103.jbl" , __magic_name__="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
lowercase__ , lowercase__ = generate_datasets(
__magic_name__ , __magic_name__ , number=__magic_name__ , min_len=1026 , trim=__magic_name__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowercase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
lowercase__ = load_gpta("gpt2" ).to(__magic_name__ )
print("computing perplexity on objective set" )
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ ).item()
print("perplexity on objective set:" , __magic_name__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _A ( __magic_name__ , __magic_name__=15 , __magic_name__=128 , __magic_name__=100 , __magic_name__="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
lowercase__ = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
lowercase__ = SecondaryLearner(__magic_name__ )
# Train secondary learner
lowercase__ = train_secondary_learner(
__magic_name__ , __magic_name__ , max_epochs=__magic_name__ , batch_size=__magic_name__ , eval_freq=100 , igf_model_path=__magic_name__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=32 , __magic_name__=1000 , __magic_name__=16 , __magic_name__=1.0 , __magic_name__=recopy_gpta , __magic_name__=None , __magic_name__=10 , __magic_name__="gpt2_finetuned.pt" , ):
lowercase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
lowercase__ = RandomSampler(__magic_name__ )
lowercase__ = DataLoader(__magic_name__ , sampler=__magic_name__ )
lowercase__ = max_steps // (len(__magic_name__ )) + 1
lowercase__ = 0
lowercase__ = torch.zeros((1, context_len) , dtype=torch.long , device=__magic_name__ )
lowercase__ , lowercase__ , lowercase__ = recopy_model(__magic_name__ , __magic_name__ , __magic_name__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(__magic_name__ )
secondary_learner.eval()
lowercase__ = []
lowercase__ = 0
lowercase__ = []
lowercase__ = []
# Compute the performance of the transformer model at the beginning
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ )
test_perps.append(__magic_name__ )
print("Test perplexity, step" , __magic_name__ , ":" , __magic_name__ )
for epoch in range(int(__magic_name__ ) ):
for step, example in enumerate(__magic_name__ ):
torch.cuda.empty_cache()
lowercase__ = random.randint(0 , example.size(2 ) - context_len - 1 )
lowercase__ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowercase__ = model(__magic_name__ , labels=__magic_name__ )
lowercase__ = True
if secondary_learner is not None:
lowercase__ = secondary_learner.forward(
torch.tensor(__magic_name__ , dtype=torch.long , device=__magic_name__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__magic_name__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowercase__ = -1
if predicted_q < threshold:
lowercase__ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowercase__ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowercase__ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ )
test_perps.append(__magic_name__ )
print("Test perplexity, step" , __magic_name__ , ":" , __magic_name__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __magic_name__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _A ( ):
lowercase__ = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=__magic_name__ , default=__magic_name__ , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=__magic_name__ , default=__magic_name__ , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=__magic_name__ , type=__magic_name__ , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=__magic_name__ , default=__magic_name__ , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=__magic_name__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=__magic_name__ , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=__magic_name__ , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1000 , type=__magic_name__ , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=__magic_name__ , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=__magic_name__ , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=__magic_name__ , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=__magic_name__ , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1026 , type=__magic_name__ , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=__magic_name__ , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=__magic_name__ , type=__magic_name__ , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=__magic_name__ , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=__magic_name__ , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=__magic_name__ , type=__magic_name__ , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__magic_name__ , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
lowercase__ = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
lowercase__ = training_secondary_learner(
__magic_name__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
lowercase__ = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowercase__ , lowercase__ = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=__magic_name__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__magic_name__ , __magic_name__ , __magic_name__ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__magic_name__ , secondary_learner=__magic_name__ , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 201 | 1 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__a = 'sshleifer/bart-tiny-random'
__a = 'patrickvonplaten/t5-tiny-random'
@require_torch
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Any ) -> Tuple:
return AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _lowercase ( self : List[Any] ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=SCREAMING_SNAKE_CASE_ , d=SCREAMING_SNAKE_CASE_ )
| 30 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
a_ = 50000
a_ = 5000
a_ , a_ = os.path.split(__file__)
a_ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def __lowercase ( snake_case_ : datasets.Dataset ,snake_case_ : Tuple ) ->Tuple:
'''simple docstring'''
for i in range(snake_case_ ):
__A : int = dataset[i]
@get_duration
def __lowercase ( snake_case_ : datasets.Dataset ,snake_case_ : Optional[Any] ,snake_case_ : int ) ->Tuple:
'''simple docstring'''
for i in range(0 ,len(snake_case_ ) ,snake_case_ ):
__A : List[str] = dataset[i : i + batch_size]
@get_duration
def __lowercase ( snake_case_ : datasets.Dataset ,snake_case_ : List[Any] ,snake_case_ : Any ) ->int:
'''simple docstring'''
with dataset.formatted_as(type=snake_case_ ):
for i in range(snake_case_ ):
__A : Union[str, Any] = dataset[i]
@get_duration
def __lowercase ( snake_case_ : datasets.Dataset ,snake_case_ : Any ,snake_case_ : Union[str, Any] ,snake_case_ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
with dataset.formatted_as(type=snake_case_ ):
for i in range(0 ,snake_case_ ,snake_case_ ):
__A : Dict = dataset[i : i + batch_size]
def __lowercase ( ) ->Optional[int]:
'''simple docstring'''
__A : int = {'''num examples''': SPEED_TEST_N_EXAMPLES}
__A : Optional[int] = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}),
]
__A : int = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
__A : Any = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
__A : List[Any] = generate_example_dataset(
os.path.join(snake_case_ ,'''dataset.arrow''' ) ,snake_case_ ,num_examples=snake_case_ ,seq_shapes={'''list''': (100,)} ,)
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ ,str(snake_case_ ) )
__A : Dict = func(snake_case_ ,**snake_case_ )
print('''shuffling dataset''' )
__A : int = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' ,func.__name__ ,str(snake_case_ ) )
__A : Optional[Any] = func(
snake_case_ ,**snake_case_ )
with open(snake_case_ ,'''wb''' ) as f:
f.write(json.dumps(snake_case_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 179 | 0 |
"""simple docstring"""
def UpperCamelCase ( _A : int = 1000 )-> int:
"""simple docstring"""
A__ = 2**power
A__ = str(__lowerCAmelCase )
A__ = list(__lowerCAmelCase )
A__ = 0
for i in list_num:
sum_of_num += int(__lowerCAmelCase )
return sum_of_num
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
UpperCAmelCase_ : Tuple = solution(power)
print("Sum of the digits is: ", result)
| 353 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
UpperCAmelCase_ : Dict = False
@skip_mps
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
lowerCAmelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __A ( cls ):
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
@classmethod
def __A ( cls ):
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
def __A ( self ):
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , )
A__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
A__ = CLIPTextModel(UpperCAmelCase__ )
A__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=0 ):
if str(UpperCAmelCase__ ).startswith("mps" ):
A__ = torch.manual_seed(UpperCAmelCase__ )
else:
A__ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
A__ = A__ = {
"prompt": "a cat and a frog",
"token_indices": [2, 5],
"generator": generator,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
"max_iter_to_alter": 2,
"thresholds": {0: 0.7},
}
return inputs
def __A ( self ):
A__ = "cpu"
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
A__ = self.get_dummy_inputs(UpperCAmelCase__ )
A__ = pipe(**UpperCAmelCase__ ).images
A__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
A__ = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
A__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase__ , 1e-3 )
def __A ( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def __A ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ):
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def __A ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __A ( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def __A ( self ):
super().test_save_load_local(expected_max_difference=5e-4 )
def __A ( self ):
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class UpperCamelCase ( unittest.TestCase ):
@classmethod
def __A ( cls ):
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
@classmethod
def __A ( cls ):
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ = torch.manual_seed(51 )
A__ = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , safety_checker=UpperCAmelCase__ , torch_dtype=torch.floataa )
pipe.to("cuda" )
A__ = "a painting of an elephant with glasses"
A__ = [5, 7]
A__ = pipe(
prompt=UpperCAmelCase__ , token_indices=UpperCAmelCase__ , guidance_scale=7.5 , generator=UpperCAmelCase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="numpy" , ).images[0]
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 198 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case_ = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 24 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> list[list[int]]:
__snake_case = []
__snake_case = []
__snake_case = 0
__snake_case = sum(snake_case_ )
create_state_space_tree(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return result
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : list[list[int]] , snake_case_ : int , ) -> None:
if sum(snake_case_ ) > max_sum or (remaining_nums_sum + sum(snake_case_ )) < max_sum:
return
if sum(snake_case_ ) == max_sum:
result.append(snake_case_ )
return
for index in range(snake_case_ , len(snake_case_ ) ):
create_state_space_tree(
snake_case_ , snake_case_ , index + 1 , [*path, nums[index]] , snake_case_ , remaining_nums_sum - nums[index] , )
snake_case_ = [3, 34, 4, 12, 5, 2]
snake_case_ = 9
snake_case_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 24 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
lowerCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
return image
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ):
lowerCAmelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ):
lowerCAmelCase = dct.pop(_UpperCAmelCase )
lowerCAmelCase = val
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCAmelCase = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
lowerCAmelCase = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
lowerCAmelCase = torch.cat((q_bias, torch.zeros_like(_UpperCAmelCase , requires_grad=_UpperCAmelCase ), v_bias) )
lowerCAmelCase = qkv_bias
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : str ):
lowerCAmelCase = 364 if 'coco' in model_name else 224
lowerCAmelCase = BlipaVisionConfig(image_size=_UpperCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowerCAmelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_UpperCAmelCase ).to_dict()
elif "opt-6.7b" in model_name:
lowerCAmelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_UpperCAmelCase ).to_dict()
elif "t5-xl" in model_name:
lowerCAmelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCAmelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
lowerCAmelCase = BlipaConfig(vision_config=_UpperCAmelCase , text_config=_UpperCAmelCase )
return config, image_size
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : int=None , _UpperCAmelCase : Any=False ):
lowerCAmelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
lowerCAmelCase = tokenizer('\n' , add_special_tokens=_UpperCAmelCase ).input_ids[0]
lowerCAmelCase ,lowerCAmelCase = get_blipa_config(_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
lowerCAmelCase = BlipaForConditionalGeneration(_UpperCAmelCase ).eval()
lowerCAmelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
lowerCAmelCase ,lowerCAmelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
lowerCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = load_model_and_preprocess(
name=_UpperCAmelCase , model_type=_UpperCAmelCase , is_eval=_UpperCAmelCase , device=_UpperCAmelCase )
original_model.eval()
print('Done!' )
# update state dict keys
lowerCAmelCase = original_model.state_dict()
lowerCAmelCase = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCAmelCase = state_dict.pop(_UpperCAmelCase )
if key.startswith('Qformer.bert' ):
lowerCAmelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
lowerCAmelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
lowerCAmelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
lowerCAmelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
lowerCAmelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
lowerCAmelCase = key.replace('t5' , 'language' )
lowerCAmelCase = val
# read in qv biases
read_in_q_v_bias(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase ,lowerCAmelCase = hf_model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
assert len(_UpperCAmelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowerCAmelCase = load_demo_image()
lowerCAmelCase = vis_processors['eval'](_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase )
lowerCAmelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_UpperCAmelCase )
# create processor
lowerCAmelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase )
lowerCAmelCase = BlipaProcessor(image_processor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
lowerCAmelCase = processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values.to(_UpperCAmelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase )
original_model.to(_UpperCAmelCase )
hf_model.to(_UpperCAmelCase )
with torch.no_grad():
if "opt" in model_name:
lowerCAmelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
lowerCAmelCase = hf_model(_UpperCAmelCase , _UpperCAmelCase ).logits
else:
lowerCAmelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
lowerCAmelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
lowerCAmelCase = hf_model(_UpperCAmelCase , _UpperCAmelCase , labels=_UpperCAmelCase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowerCAmelCase = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_UpperCAmelCase )
assert torch.allclose(logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowerCAmelCase = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_UpperCAmelCase )
else:
# cast to same type
lowerCAmelCase = logits.dtype
assert torch.allclose(original_logits.to(_UpperCAmelCase ) , _UpperCAmelCase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
lowerCAmelCase = ''
lowerCAmelCase = tokenizer(_UpperCAmelCase , return_tensors='pt' ).input_ids.to(_UpperCAmelCase )
lowerCAmelCase = original_model.generate({'image': original_pixel_values} )
lowerCAmelCase = hf_model.generate(
_UpperCAmelCase , _UpperCAmelCase , do_sample=_UpperCAmelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , _UpperCAmelCase )
lowerCAmelCase = input_ids.shape[1]
lowerCAmelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_UpperCAmelCase )
lowerCAmelCase = [text.strip() for text in output_text]
print('HF generation:' , _UpperCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_UpperCAmelCase )
hf_model.save_pretrained(_UpperCAmelCase )
if push_to_hub:
processor.push_to_hub(F'nielsr/{model_name}' )
hf_model.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
__UpperCamelCase : Dict = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase : Any = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCAmelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase )
| 309 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : str = logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "unispeech"
def __init__( self : Union[str, Any] ,lowerCamelCase__ : Dict=32 ,lowerCamelCase__ : Union[str, Any]=768 ,lowerCamelCase__ : Optional[Any]=12 ,lowerCamelCase__ : List[str]=12 ,lowerCamelCase__ : Tuple=3_072 ,lowerCamelCase__ : Union[str, Any]="gelu" ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Optional[Any]=0.1 ,lowerCamelCase__ : Union[str, Any]=0.1 ,lowerCamelCase__ : str=0.0 ,lowerCamelCase__ : str=0.0 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : Optional[Any]=0.0_2 ,lowerCamelCase__ : Union[str, Any]=1e-5 ,lowerCamelCase__ : str="group" ,lowerCamelCase__ : List[str]="gelu" ,lowerCamelCase__ : List[str]=(512, 512, 512, 512, 512, 512, 512) ,lowerCamelCase__ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) ,lowerCamelCase__ : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) ,lowerCamelCase__ : int=False ,lowerCamelCase__ : int=128 ,lowerCamelCase__ : Tuple=16 ,lowerCamelCase__ : Any=False ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : List[Any]=0.0_5 ,lowerCamelCase__ : Tuple=10 ,lowerCamelCase__ : Union[str, Any]=2 ,lowerCamelCase__ : Any=0.0 ,lowerCamelCase__ : Dict=10 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Optional[Any]=320 ,lowerCamelCase__ : Any=2 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Optional[int]=100 ,lowerCamelCase__ : Optional[Any]=256 ,lowerCamelCase__ : int=256 ,lowerCamelCase__ : Union[str, Any]=0.1 ,lowerCamelCase__ : Tuple="mean" ,lowerCamelCase__ : Optional[Any]=False ,lowerCamelCase__ : List[Any]=False ,lowerCamelCase__ : Optional[int]=256 ,lowerCamelCase__ : Optional[Any]=80 ,lowerCamelCase__ : Optional[Any]=0 ,lowerCamelCase__ : List[str]=1 ,lowerCamelCase__ : int=2 ,lowerCamelCase__ : Union[str, Any]=0.5 ,**lowerCamelCase__ : Dict ,):
super().__init__(**_lowerCAmelCase ,pad_token_id=_lowerCAmelCase ,bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = feat_extract_norm
UpperCAmelCase__ = feat_extract_activation
UpperCAmelCase__ = list(_lowerCAmelCase )
UpperCAmelCase__ = list(_lowerCAmelCase )
UpperCAmelCase__ = list(_lowerCAmelCase )
UpperCAmelCase__ = conv_bias
UpperCAmelCase__ = num_conv_pos_embeddings
UpperCAmelCase__ = num_conv_pos_embedding_groups
UpperCAmelCase__ = len(self.conv_dim )
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = feat_proj_dropout
UpperCAmelCase__ = final_dropout
UpperCAmelCase__ = layerdrop
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_ctc_classes
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = do_stable_layer_norm
UpperCAmelCase__ = use_weighted_layer_sum
UpperCAmelCase__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase__ = apply_spec_augment
UpperCAmelCase__ = mask_time_prob
UpperCAmelCase__ = mask_time_length
UpperCAmelCase__ = mask_time_min_masks
UpperCAmelCase__ = mask_feature_prob
UpperCAmelCase__ = mask_feature_length
UpperCAmelCase__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase__ = num_codevectors_per_group
UpperCAmelCase__ = num_codevector_groups
UpperCAmelCase__ = contrastive_logits_temperature
UpperCAmelCase__ = feat_quantizer_dropout
UpperCAmelCase__ = num_negatives
UpperCAmelCase__ = codevector_dim
UpperCAmelCase__ = proj_codevector_dim
UpperCAmelCase__ = diversity_loss_weight
# ctc loss
UpperCAmelCase__ = ctc_loss_reduction
UpperCAmelCase__ = ctc_zero_infinity
# pretraining loss
UpperCAmelCase__ = replace_prob
@property
def __lowerCAmelCase ( self : List[str] ):
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 98 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def __a(SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
if num <= 0:
raise ValueError("math domain error" )
return quad(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , args=(SCREAMING_SNAKE_CASE_) )[0]
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
return math.pow(SCREAMING_SNAKE_CASE_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 158 | 0 |
'''simple docstring'''
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Any:
'''simple docstring'''
self.assertEqual(len(A_ ) , len(A_ ) )
for a, b in zip(A_ , A_ ):
self.assertAlmostEqual(A_ , A_ , delta=A_ )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(A_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = None
ops.enable_eager_execution_internal()
UpperCamelCase = tf.config.list_physical_devices('CPU' )
if len(A_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
UpperCamelCase = tf.config.list_logical_devices(device_type='CPU' )
UpperCamelCase = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
UpperCamelCase = GradientAccumulator()
UpperCamelCase = tf.Variable([4.0, 3.0] )
UpperCamelCase , UpperCamelCase = create_optimizer(5e-5 , 10 , 5 )
UpperCamelCase = tf.Variable([0.0, 0.0] , trainable=A_ )
def accumulate_on_replica(A_ ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(A_ , A_ ):
with strategy.scope():
UpperCamelCase = strategy.experimental_local_results(A_ )
local_variables[0].assign(A_ )
local_variables[1].assign(A_ )
strategy.run(A_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(A_ )
def _check_local_values(A_ , A_ ):
UpperCamelCase = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , A_ , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , A_ , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 357 |
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase):
lowerCAmelCase_ = WavaVecaPhonemeCTCTokenizer
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
super().setUp()
UpperCamelCase = (
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(' ' )
UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def UpperCAmelCase_ ( self , A_ , A_=False , A_=20 , A_=5 )-> Tuple[str, list]:
'''simple docstring'''
UpperCamelCase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=A_ )) for i in range(len(A_ ) )]
UpperCamelCase = list(filter(lambda A_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=A_ ) , A_ ) )
if max_length is not None and len(A_ ) > max_length:
UpperCamelCase = toks[:max_length]
if min_length is not None and len(A_ ) < min_length and len(A_ ) > 0:
while len(A_ ) < min_length:
UpperCamelCase = toks + toks
# toks_str = [t[1] for t in toks]
UpperCamelCase = [t[0] for t in toks]
# Ensure consistency
UpperCamelCase = tokenizer.decode(A_ , clean_up_tokenization_spaces=A_ )
if " " not in output_txt and len(A_ ) > 1:
UpperCamelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=A_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=A_ )
)
if with_prefix_space:
UpperCamelCase = ' ' + output_txt
UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
return output_txt, output_ids
def UpperCAmelCase_ ( self , **A_ )-> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
# check adding a single token
tokenizer.add_tokens('xxx' )
UpperCamelCase = tokenizer('m xxx ɪ' , do_phonemize=A_ ).input_ids
self.assertEqual(A_ , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] )
UpperCamelCase = tokenizer('m aaa ɪ ccc' , do_phonemize=A_ ).input_ids
self.assertEqual(A_ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
UpperCamelCase = tokenizer('maɪ c' , do_phonemize=A_ ).input_ids
self.assertEqual(A_ , [3, 200] ) # mai should be <unk> (=3)
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
self.assertEqual(A_ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(A_ ).input_ids , tokenizer(A_ , do_phonemize=A_ ).input_ids )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
UpperCamelCase = tokenizer.decode(tokenizer(A_ ).input_ids )
self.assertEqual(A_ , A_ )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCamelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
UpperCamelCase = tokenizer.decode(sample_ids[0] )
UpperCamelCase = tokenizer.batch_decode(A_ )
self.assertEqual(A_ , batch_tokens[0] )
self.assertEqual(A_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
self.assertEqual(A_ , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(A_ ).input_ids , tokenizer(A_ , do_phonemize=A_ ).input_ids )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
UpperCamelCase = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
UpperCamelCase = tokenizer.decode(sample_ids[0] )
UpperCamelCase = tokenizer.batch_decode(A_ )
self.assertEqual(A_ , batch_tokens[0] )
self.assertEqual(A_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
# decode with no word_del_token filter
UpperCamelCase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=A_ )
UpperCamelCase = tokenizer.batch_decode(A_ , filter_word_delimiter_token=A_ )
self.assertEqual(A_ , batch_tokens[0] )
self.assertEqual(A_ , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
UpperCamelCase = tokenizer.decode(tokenizer(A_ ).input_ids , filter_word_delimiter_token=A_ )
self.assertEqual(A_ , A_ )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
UpperCamelCase = tokenizer.decode(tokenizer(A_ ).input_ids , filter_word_delimiter_token=A_ )
self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , A_ )
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=A_ )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer(A_ , phonemizer_lang='en-us' ).input_ids
UpperCamelCase = tokenizer(A_ , phonemizer_lang='fr-fr' ).input_ids
self.assertNotEqual(A_ , A_ )
UpperCamelCase = tokenizer.decode(A_ )
UpperCamelCase = tokenizer.decode(A_ )
self.assertEqual(A_ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
self.assertEqual(A_ , 'ɛ l o h aʊ a ʁ j u' )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCamelCase = 'Hello how Are you'
UpperCamelCase = 'hello how are you'
UpperCamelCase = tokenizer(A_ ).input_ids
UpperCamelCase = tokenizer(A_ ).input_ids
self.assertEqual(A_ , A_ )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
tokenizer.add_tokens(['!', '?'] )
tokenizer.add_special_tokens({'cls_token': '$$$'} )
# fmt: off
UpperCamelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
UpperCamelCase = tokenizer.batch_decode(A_ )
self.assertEqual(A_ , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] )
@staticmethod
def UpperCAmelCase_ ( A_ , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.get_tokenizer(word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
UpperCamelCase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
UpperCamelCase = tokenizer.decode(A_ , output_char_offsets=A_ , filter_word_delimiter_token=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('text' in outputs )
self.assertTrue('char_offsets' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = self.get_tokenizer(word_delimiter_token='|' )
def check_list_tuples_equal(A_ , A_ ):
self.assertTrue(isinstance(A_ , A_ ) )
self.assertTrue(isinstance(outputs_list[0] , A_ ) )
# transform list to ModelOutput
UpperCamelCase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] )
def recursive_check(A_ , A_ ):
if isinstance(A_ , A_ ):
[recursive_check(A_ , A_ ) for la, la in zip(A_ , A_ )]
self.assertEqual(A_ , A_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] )
# fmt: off
UpperCamelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
UpperCamelCase = tokenizer.batch_decode(A_ , output_char_offsets=A_ )
UpperCamelCase = [tokenizer.decode(A_ , output_char_offsets=A_ ) for ids in sample_ids]
check_list_tuples_equal(A_ , A_ )
@unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
@unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
pass
@unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
@unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
UpperCamelCase = tokenizer.vocab_size
UpperCamelCase = len(A_ )
self.assertNotEqual(A_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCamelCase = ['aaaaa bbbbbb', 'cccccccccdddddddd']
UpperCamelCase = tokenizer.add_tokens(A_ )
UpperCamelCase = tokenizer.vocab_size
UpperCamelCase = len(A_ )
self.assertNotEqual(A_ , 0 )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , len(A_ ) )
self.assertEqual(A_ , all_size + len(A_ ) )
UpperCamelCase = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=A_ )
self.assertGreaterEqual(len(A_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCamelCase = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
UpperCamelCase = tokenizer.add_special_tokens(A_ )
UpperCamelCase = tokenizer.vocab_size
UpperCamelCase = len(A_ )
self.assertNotEqual(A_ , 0 )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , len(A_ ) )
self.assertEqual(A_ , all_size_a + len(A_ ) )
UpperCamelCase = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=A_ )
self.assertGreaterEqual(len(A_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
pass
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.get_tokenizers(fast=A_ , do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
UpperCamelCase = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
UpperCamelCase = tokenizer.convert_tokens_to_string(A_ )
self.assertIsInstance(output['text'] , A_ )
| 251 | 0 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowercase__ : Dict = logging.get_logger(__name__)
enable_full_determinism()
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = UNetaDModel
lowerCAmelCase_ = '''sample'''
@property
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = 4
snake_case_ = 3
snake_case_ = (32, 32)
snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(__lowercase )
snake_case_ = torch.tensor([10] ).to(__lowercase )
return {"sample": noise, "timestep": time_step}
@property
def snake_case__ ( self : int ):
"""simple docstring"""
return (3, 32, 32)
@property
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
return (3, 32, 32)
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = UNetaDModel
lowerCAmelCase_ = '''sample'''
@property
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = 4
snake_case_ = 4
snake_case_ = (32, 32)
snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(__lowercase )
snake_case_ = torch.tensor([10] ).to(__lowercase )
return {"sample": noise, "timestep": time_step}
@property
def snake_case__ ( self : Tuple ):
"""simple docstring"""
return (4, 32, 32)
@property
def snake_case__ ( self : str ):
"""simple docstring"""
return (4, 32, 32)
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ , snake_case_ = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__lowercase )
snake_case_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ , snake_case_ = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=__lowercase )
model.to(__lowercase )
snake_case_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ , snake_case_ = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=__lowercase )
model_accelerate.to(__lowercase )
model_accelerate.eval()
snake_case_ = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ = noise.to(__lowercase )
snake_case_ = torch.tensor([10] * noise.shape[0] ).to(__lowercase )
snake_case_ = model_accelerate(__lowercase , __lowercase )["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case_ , snake_case_ = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=__lowercase , low_cpu_mem_usage=__lowercase )
model_normal_load.to(__lowercase )
model_normal_load.eval()
snake_case_ = model_normal_load(__lowercase , __lowercase )["sample"]
assert torch_all_close(__lowercase , __lowercase , rtol=1E-3 )
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" )
model.eval()
model.to(__lowercase )
snake_case_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ = noise.to(__lowercase )
snake_case_ = torch.tensor([10] * noise.shape[0] ).to(__lowercase )
with torch.no_grad():
snake_case_ = model(__lowercase , __lowercase ).sample
snake_case_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(__lowercase , __lowercase , rtol=1E-3 ) )
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = UNetaDModel
lowerCAmelCase_ = '''sample'''
@property
def snake_case__ ( self : Any , __lowercase : Any=(32, 32) ):
"""simple docstring"""
snake_case_ = 4
snake_case_ = 3
snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(__lowercase )
snake_case_ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__lowercase )
return {"sample": noise, "timestep": time_step}
@property
def snake_case__ ( self : Tuple ):
"""simple docstring"""
return (3, 32, 32)
@property
def snake_case__ ( self : Tuple ):
"""simple docstring"""
return (3, 32, 32)
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1E-6,
"mid_block_scale_factor": math.sqrt(2.0 ),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
@slow
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ , snake_case_ = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__lowercase )
snake_case_ = self.dummy_input
snake_case_ = floats_tensor((4, 3) + (2_56, 2_56) ).to(__lowercase )
snake_case_ = noise
snake_case_ = model(**__lowercase )
assert image is not None, "Make sure output is not None"
@slow
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" )
model.to(__lowercase )
snake_case_ = 4
snake_case_ = 3
snake_case_ = (2_56, 2_56)
snake_case_ = torch.ones((batch_size, num_channels) + sizes ).to(__lowercase )
snake_case_ = torch.tensor(batch_size * [1E-4] ).to(__lowercase )
with torch.no_grad():
snake_case_ = model(__lowercase , __lowercase ).sample
snake_case_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(__lowercase , __lowercase , rtol=1E-2 ) )
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" )
model.to(__lowercase )
snake_case_ = 4
snake_case_ = 3
snake_case_ = (32, 32)
snake_case_ = torch.ones((batch_size, num_channels) + sizes ).to(__lowercase )
snake_case_ = torch.tensor(batch_size * [1E-4] ).to(__lowercase )
with torch.no_grad():
snake_case_ = model(__lowercase , __lowercase ).sample
snake_case_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(__lowercase , __lowercase , rtol=1E-2 ) )
def snake_case__ ( self : Dict ):
"""simple docstring"""
pass
| 187 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowercase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
("time_embed.0.weight", "time_embedding.linear_1.weight"),
("time_embed.0.bias", "time_embedding.linear_1.bias"),
("time_embed.2.weight", "time_embedding.linear_2.weight"),
("time_embed.2.bias", "time_embedding.linear_2.bias"),
("input_blocks.0.0.weight", "conv_in.weight"),
("input_blocks.0.0.bias", "conv_in.bias"),
("out.0.weight", "conv_norm_out.weight"),
("out.0.bias", "conv_norm_out.bias"),
("out.2.weight", "conv_out.weight"),
("out.2.bias", "conv_out.bias"),
]
lowercase__ : List[Any] = [
# (stable-diffusion, HF Diffusers)
("in_layers.0", "norm1"),
("in_layers.2", "conv1"),
("out_layers.0", "norm2"),
("out_layers.3", "conv2"),
("emb_layers.1", "time_emb_proj"),
("skip_connection", "conv_shortcut"),
]
lowercase__ : Optional[Any] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowercase__ : str = f'''down_blocks.{i}.resnets.{j}.'''
lowercase__ : Union[str, Any] = f'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowercase__ : Tuple = f'''down_blocks.{i}.attentions.{j}.'''
lowercase__ : Dict = f'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowercase__ : List[Any] = f'''up_blocks.{i}.resnets.{j}.'''
lowercase__ : int = f'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowercase__ : List[str] = f'''up_blocks.{i}.attentions.{j}.'''
lowercase__ : Tuple = f'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowercase__ : List[str] = f'''down_blocks.{i}.downsamplers.0.conv.'''
lowercase__ : Any = f'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowercase__ : Optional[int] = f'''up_blocks.{i}.upsamplers.0.'''
lowercase__ : int = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowercase__ : Union[str, Any] = "mid_block.attentions.0."
lowercase__ : List[Any] = "middle_block.1."
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowercase__ : Tuple = f'''mid_block.resnets.{j}.'''
lowercase__ : List[str] = f'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
snake_case_ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
snake_case_ = v.replace(_A , _A )
snake_case_ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
snake_case_ = v.replace(_A , _A )
snake_case_ = v
snake_case_ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowercase__ : Dict = [
# (stable-diffusion, HF Diffusers)
("nin_shortcut", "conv_shortcut"),
("norm_out", "conv_norm_out"),
("mid.attn_1.", "mid_block.attentions.0."),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowercase__ : Any = f'''encoder.down_blocks.{i}.resnets.{j}.'''
lowercase__ : List[Any] = f'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowercase__ : Optional[int] = f'''down_blocks.{i}.downsamplers.0.'''
lowercase__ : Tuple = f'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowercase__ : List[str] = f'''up_blocks.{i}.upsamplers.0.'''
lowercase__ : Optional[int] = f'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowercase__ : int = f'''decoder.up_blocks.{i}.resnets.{j}.'''
lowercase__ : Union[str, Any] = f'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowercase__ : Dict = f'''mid_block.resnets.{i}.'''
lowercase__ : int = f'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowercase__ : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
("norm.", "group_norm."),
("q.", "query."),
("k.", "key."),
("v.", "value."),
("proj_out.", "proj_attn."),
]
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
snake_case_ = v.replace(_A , _A )
snake_case_ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
snake_case_ = v.replace(_A , _A )
snake_case_ = v
snake_case_ = {v: vae_state_dict[k] for k, v in mapping.items()}
snake_case_ = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"mid.attn_1.{weight_name}.weight" in k:
print(f"Reshaping {k} for SD format" )
snake_case_ = reshape_weight_for_sd(_A )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowercase__ : int = [
# (stable-diffusion, HF Diffusers)
("resblocks.", "text_model.encoder.layers."),
("ln_1", "layer_norm1"),
("ln_2", "layer_norm2"),
(".c_fc.", ".fc1."),
(".c_proj.", ".fc2."),
(".attn", ".self_attn"),
("ln_final.", "transformer.text_model.final_layer_norm."),
("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
]
lowercase__ : Dict = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowercase__ : Tuple = re.compile("|".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowercase__ : Dict = {"q": 0, "k": 1, "v": 2}
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = {}
snake_case_ = {}
snake_case_ = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
snake_case_ = k[: -len(".q_proj.weight" )]
snake_case_ = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
snake_case_ = [None, None, None]
snake_case_ = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
snake_case_ = k[: -len(".q_proj.bias" )]
snake_case_ = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
snake_case_ = [None, None, None]
snake_case_ = v
continue
snake_case_ = textenc_pattern.sub(lambda _A : protected[re.escape(m.group(0 ) )] , _A )
snake_case_ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
snake_case_ = textenc_pattern.sub(lambda _A : protected[re.escape(m.group(0 ) )] , _A )
snake_case_ = torch.cat(_A )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
snake_case_ = textenc_pattern.sub(lambda _A : protected[re.escape(m.group(0 ) )] , _A )
snake_case_ = torch.cat(_A )
return new_state_dict
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--use_safetensors", action="store_true", help="Save weights use safetensors, default is ckpt."
)
lowercase__ : Dict = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowercase__ : Tuple = osp.join(args.model_path, "unet", "diffusion_pytorch_model.safetensors")
lowercase__ : int = osp.join(args.model_path, "vae", "diffusion_pytorch_model.safetensors")
lowercase__ : Any = osp.join(args.model_path, "text_encoder", "model.safetensors")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowercase__ : str = load_file(unet_path, device="cpu")
else:
lowercase__ : Optional[Any] = osp.join(args.model_path, "unet", "diffusion_pytorch_model.bin")
lowercase__ : Any = torch.load(unet_path, map_location="cpu")
if osp.exists(vae_path):
lowercase__ : Tuple = load_file(vae_path, device="cpu")
else:
lowercase__ : Any = osp.join(args.model_path, "vae", "diffusion_pytorch_model.bin")
lowercase__ : Dict = torch.load(vae_path, map_location="cpu")
if osp.exists(text_enc_path):
lowercase__ : Union[str, Any] = load_file(text_enc_path, device="cpu")
else:
lowercase__ : Union[str, Any] = osp.join(args.model_path, "text_encoder", "pytorch_model.bin")
lowercase__ : Optional[int] = torch.load(text_enc_path, map_location="cpu")
# Convert the UNet model
lowercase__ : Dict = convert_unet_state_dict(unet_state_dict)
lowercase__ : Any = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowercase__ : Dict = convert_vae_state_dict(vae_state_dict)
lowercase__ : Union[str, Any] = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowercase__ : Optional[Any] = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowercase__ : Any = {"transformer." + k: v for k, v in text_enc_dict.items()}
lowercase__ : List[Any] = convert_text_enc_state_dict_vaa(text_enc_dict)
lowercase__ : List[str] = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()}
else:
lowercase__ : Tuple = convert_text_enc_state_dict(text_enc_dict)
lowercase__ : Any = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowercase__ : Tuple = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowercase__ : Any = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowercase__ : Union[str, Any] = {"state_dict": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 187 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def A_ ( _lowerCAmelCase ) -> Tuple:
UpperCamelCase : Any = SwinvaConfig()
UpperCamelCase : List[Any] = swinva_name.split("_" )
UpperCamelCase : Tuple = name_split[1]
if "to" in name_split[3]:
UpperCamelCase : Tuple = int(name_split[3][-3:] )
else:
UpperCamelCase : Union[str, Any] = int(name_split[3] )
if "to" in name_split[2]:
UpperCamelCase : List[Any] = int(name_split[2][-2:] )
else:
UpperCamelCase : Optional[Any] = int(name_split[2][6:] )
if model_size == "tiny":
UpperCamelCase : Tuple = 96
UpperCamelCase : List[Any] = (2, 2, 6, 2)
UpperCamelCase : str = (3, 6, 12, 24)
elif model_size == "small":
UpperCamelCase : List[str] = 96
UpperCamelCase : List[Any] = (2, 2, 18, 2)
UpperCamelCase : Any = (3, 6, 12, 24)
elif model_size == "base":
UpperCamelCase : Optional[Any] = 128
UpperCamelCase : str = (2, 2, 18, 2)
UpperCamelCase : str = (4, 8, 16, 32)
else:
UpperCamelCase : Any = 192
UpperCamelCase : Union[str, Any] = (2, 2, 18, 2)
UpperCamelCase : int = (6, 12, 24, 48)
if "to" in swinva_name:
UpperCamelCase : List[str] = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
UpperCamelCase : Optional[Any] = 2_1841
UpperCamelCase : List[str] = "huggingface/label-files"
UpperCamelCase : List[Any] = "imagenet-22k-id2label.json"
UpperCamelCase : List[str] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
UpperCamelCase : Optional[int] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase : Union[str, Any] = idalabel
UpperCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase : List[Any] = 1000
UpperCamelCase : Optional[int] = "huggingface/label-files"
UpperCamelCase : Dict = "imagenet-1k-id2label.json"
UpperCamelCase : Any = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
UpperCamelCase : Optional[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase : Dict = idalabel
UpperCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase : Tuple = img_size
UpperCamelCase : str = num_classes
UpperCamelCase : int = embed_dim
UpperCamelCase : Any = depths
UpperCamelCase : Union[str, Any] = num_heads
UpperCamelCase : Dict = window_size
return config
def A_ ( _lowerCAmelCase ) -> Optional[int]:
if "patch_embed.proj" in name:
UpperCamelCase : Optional[Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
UpperCamelCase : Optional[int] = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
UpperCamelCase : Union[str, Any] = "encoder." + name
if "attn.proj" in name:
UpperCamelCase : Dict = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCamelCase : int = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCamelCase : List[Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCamelCase : Tuple = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCamelCase : List[Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCamelCase : Optional[int] = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
UpperCamelCase : Union[str, Any] = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
UpperCamelCase : List[str] = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
UpperCamelCase : Tuple = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
UpperCamelCase : Union[str, Any] = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
UpperCamelCase : str = "layernorm.weight"
if name == "norm.bias":
UpperCamelCase : Any = "layernorm.bias"
if "head" in name:
UpperCamelCase : int = name.replace("head" , "classifier" )
else:
UpperCamelCase : Any = "swinv2." + name
return name
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
UpperCamelCase : List[str] = orig_state_dict.pop(_lowerCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase : Tuple = key.split("." )
UpperCamelCase : Union[str, Any] = int(key_split[1] )
UpperCamelCase : Union[str, Any] = int(key_split[3] )
UpperCamelCase : List[Any] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase : List[str] = val[:dim, :]
UpperCamelCase : int = val[dim : dim * 2, :]
UpperCamelCase : Union[str, Any] = val[-dim:, :]
else:
UpperCamelCase : List[Any] = val[:dim]
UpperCamelCase : List[str] = val[
dim : dim * 2
]
UpperCamelCase : Union[str, Any] = val[-dim:]
else:
UpperCamelCase : List[Any] = val
return orig_state_dict
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Optional[Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
UpperCamelCase : Optional[int] = get_swinva_config(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = SwinvaForImageClassification(_lowerCAmelCase )
model.eval()
UpperCamelCase : Optional[int] = convert_state_dict(timm_model.state_dict() , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase : str = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
UpperCamelCase : Union[str, Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
UpperCamelCase : str = image_processor(images=_lowerCAmelCase , return_tensors="pt" )
UpperCamelCase : str = timm_model(inputs["pixel_values"] )
UpperCamelCase : List[str] = model(**_lowerCAmelCase ).logits
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 140 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : str = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 140 | 1 |
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase_ = {
'Salesforce/codegen-350M-mono': 2048,
}
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : List[str] = VOCAB_FILES_NAMES
a : Dict = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : str = ["input_ids", "attention_mask"]
a : Optional[Any] = CodeGenTokenizer
def __init__( self, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__="<|endoftext|>", __magic_name__="<|endoftext|>", __magic_name__="<|endoftext|>", __magic_name__=False, **__magic_name__, ) -> List[str]:
"""simple docstring"""
super().__init__(
__magic_name__, __magic_name__, tokenizer_file=__magic_name__, unk_token=__magic_name__, bos_token=__magic_name__, eos_token=__magic_name__, add_prefix_space=__magic_name__, **__magic_name__, )
if kwargs.pop('''add_bos_token''', __magic_name__ ):
UpperCamelCase__ : Dict = kwargs.pop('''name_or_path''', '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
UpperCamelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''', __magic_name__ ) != add_prefix_space:
UpperCamelCase__ : List[str] = getattr(__magic_name__, pre_tok_state.pop('''type''' ) )
UpperCamelCase__ : Union[str, Any] = add_prefix_space
UpperCamelCase__ : int = pre_tok_class(**__magic_name__ )
UpperCamelCase__ : Dict = add_prefix_space
def UpperCamelCase__ ( self, *__magic_name__, **__magic_name__ ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = kwargs.get('''is_split_into_words''', __magic_name__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__magic_name__, **__magic_name__ )
def UpperCamelCase__ ( self, *__magic_name__, **__magic_name__ ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = kwargs.get('''is_split_into_words''', __magic_name__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__magic_name__, **__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> Tuple[str]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self._tokenizer.model.save(__magic_name__, name=__magic_name__ )
return tuple(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = False, __magic_name__ = None, __magic_name__ = None, **__magic_name__, ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = super().decode(
token_ids=__magic_name__, skip_special_tokens=__magic_name__, clean_up_tokenization_spaces=__magic_name__, **__magic_name__, )
if truncate_before_pattern is not None and len(__magic_name__ ) > 0:
UpperCamelCase__ : Any = self.truncate(__magic_name__, __magic_name__ )
return decoded_text
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> Any:
"""simple docstring"""
def find_re(__magic_name__, __magic_name__, __magic_name__ ):
UpperCamelCase__ : Optional[Any] = pattern.search(__magic_name__, __magic_name__ )
return m.start() if m else -1
UpperCamelCase__ : Any = [re.compile(__magic_name__, re.MULTILINE ) for pattern in truncate_before_pattern]
UpperCamelCase__ : Dict = list(re.finditer('''^print''', __magic_name__, re.MULTILINE ) )
if len(__magic_name__ ) > 1:
UpperCamelCase__ : List[str] = completion[: prints[1].start()]
UpperCamelCase__ : Optional[Any] = list(re.finditer('''^def''', __magic_name__, re.MULTILINE ) )
if len(__magic_name__ ) > 1:
UpperCamelCase__ : Tuple = completion[: defs[1].start()]
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : int = [
pos for pos in [find_re(__magic_name__, __magic_name__, __magic_name__ ) for terminal in terminals] if pos != -1
]
if len(__magic_name__ ) > 0:
return completion[: min(__magic_name__ )]
else:
return completion
| 201 |
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> int:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
UpperCamelCase__ : Optional[Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201 | 1 |
'''simple docstring'''
import os
import string
import sys
a : List[str] = 1 << 8
a : Tuple = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
a : Any = KEYMAP['up']
a : Tuple = KEYMAP['left']
if sys.platform == "win32":
a : Union[str, Any] = []
a : Any = {
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
a : Any = ord(str(i))
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
snake_case_ = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__UpperCAmelCase ) == 0:
# Read the keystroke
snake_case_ = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case_ = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case_ = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(__UpperCAmelCase )
if ord(__UpperCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
snake_case_ = chr(KEYMAP['''esc'''] )
except KeyError:
snake_case_ = cha[1]
else:
snake_case_ = ch.decode(__UpperCAmelCase )
else:
snake_case_ = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case_ = sys.stdin.fileno()
snake_case_ = termios.tcgetattr(__UpperCAmelCase )
try:
tty.setraw(__UpperCAmelCase )
snake_case_ = sys.stdin.read(1 )
finally:
termios.tcsetattr(__UpperCAmelCase, termios.TCSADRAIN, __UpperCAmelCase )
return ch
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
snake_case_ = get_raw_chars()
if ord(__UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__UpperCAmelCase ) == KEYMAP["esc"]:
snake_case_ = get_raw_chars()
if ord(__UpperCAmelCase ) == KEYMAP["mod_int"]:
snake_case_ = get_raw_chars()
if ord(__UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__UpperCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 354 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
a : Union[str, Any] = get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=0 ) -> Tuple:
'''simple docstring'''
os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase )
with FSDP.state_dict_type(
__UpperCAmelCase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
snake_case_ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin"
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
if accelerator.process_index == 0:
logger.info(F"Saving model to {output_model_file}" )
torch.save(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ = (
F"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Saving model to {output_model_file}" )
torch.save(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ = os.path.join(__UpperCAmelCase, F"{MODEL_NAME}_{model_index}" )
os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase )
logger.info(F"Saving model to {ckpt_dir}" )
snake_case_ = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=__UpperCAmelCase, storage_writer=dist_cp.FileSystemWriter(__UpperCAmelCase ), planner=DefaultSavePlanner(), )
logger.info(F"Model saved to {ckpt_dir}" )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=0 ) -> str:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCAmelCase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__UpperCAmelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
snake_case_ = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin"
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Loading model from {input_model_file}" )
snake_case_ = torch.load(__UpperCAmelCase )
logger.info(F"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ = (
F"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Loading model from {input_model_file}" )
snake_case_ = torch.load(__UpperCAmelCase )
logger.info(F"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ = (
os.path.join(__UpperCAmelCase, F"{MODEL_NAME}_{model_index}" )
if F"{MODEL_NAME}" not in input_dir
else input_dir
)
logger.info(F"Loading model from {ckpt_dir}" )
snake_case_ = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__UpperCAmelCase, storage_reader=dist_cp.FileSystemReader(__UpperCAmelCase ), planner=DefaultLoadPlanner(), )
snake_case_ = state_dict['''model''']
logger.info(F"Model loaded from {ckpt_dir}" )
model.load_state_dict(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=0 ) -> Dict:
'''simple docstring'''
os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase )
with FSDP.state_dict_type(
__UpperCAmelCase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
snake_case_ = FSDP.optim_state_dict(__UpperCAmelCase, __UpperCAmelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case_ = (
F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Saving Optimizer state to {output_optimizer_file}" )
torch.save(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Optimizer state saved in {output_optimizer_file}" )
else:
snake_case_ = os.path.join(__UpperCAmelCase, F"{OPTIMIZER_NAME}_{optimizer_index}" )
os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase )
logger.info(F"Saving Optimizer state to {ckpt_dir}" )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state}, storage_writer=dist_cp.FileSystemWriter(__UpperCAmelCase ), planner=DefaultSavePlanner(), )
logger.info(F"Optimizer state saved in {ckpt_dir}" )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=0 ) -> Union[str, Any]:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCAmelCase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case_ = (
F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Loading Optimizer state from {input_optimizer_file}" )
snake_case_ = torch.load(__UpperCAmelCase )
logger.info(F"Optimizer state loaded from {input_optimizer_file}" )
else:
snake_case_ = (
os.path.join(__UpperCAmelCase, F"{OPTIMIZER_NAME}_{optimizer_index}" )
if F"{OPTIMIZER_NAME}" not in input_dir
else input_dir
)
logger.info(F"Loading Optimizer from {ckpt_dir}" )
snake_case_ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict(), optimizer_key='''optimizer''', storage_reader=dist_cp.FileSystemReader(__UpperCAmelCase ), )
snake_case_ = optim_state['''optimizer''']
logger.info(F"Optimizer loaded from {ckpt_dir}" )
snake_case_ = FSDP.optim_state_dict_to_load(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
optimizer.load_state_dict(__UpperCAmelCase )
| 72 | 0 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
assert (
isinstance(__UpperCamelCase , __UpperCamelCase ) and number_of_steps > 0
), f"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
lowerCAmelCase , lowerCAmelCase : Optional[Any] = 1, 1
for _ in range(number_of_steps - 1 ):
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 | from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = ["pixel_values"]
def __init__( self : Any , _lowercase : bool = True , _lowercase : int = 32 , _lowercase : Any=PILImageResampling.BILINEAR , _lowercase : bool = True , **_lowercase : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = size_divisor
SCREAMING_SNAKE_CASE__ = resample
super().__init__(**_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : int , _lowercase : int , _lowercase : Optional[ChannelDimension] = None , **_lowercase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_image_size(_lowercase )
# Rounds the height and width down to the closest multiple of size_divisor
SCREAMING_SNAKE_CASE__ = height // size_divisor * size_divisor
SCREAMING_SNAKE_CASE__ = width // size_divisor * size_divisor
SCREAMING_SNAKE_CASE__ = resize(_lowercase , (new_h, new_w) , resample=_lowercase , data_format=_lowercase , **_lowercase )
return image
def __a ( self : int , _lowercase : np.ndarray , _lowercase : float , _lowercase : Optional[ChannelDimension] = None , **_lowercase : str ):
"""simple docstring"""
return rescale(image=_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Tuple , _lowercase : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , _lowercase : Optional[bool] = None , _lowercase : Optional[int] = None , _lowercase : int=None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[TensorType, str]] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : Optional[int] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ = size_divisor if size_divisor is not None else self.size_divisor
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
SCREAMING_SNAKE_CASE__ = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(_lowercase ) for img in images]
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(_lowercase , size_divisor=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ = [self.rescale(_lowercase , scale=1 / 2_55 ) for image in images]
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 219 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
"configuration_bridgetower": [
"BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BridgeTowerConfig",
"BridgeTowerTextConfig",
"BridgeTowerVisionConfig",
],
"processing_bridgetower": ["BridgeTowerProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["BridgeTowerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST",
"BridgeTowerForContrastiveLearning",
"BridgeTowerForImageAndTextRetrieval",
"BridgeTowerForMaskedLM",
"BridgeTowerModel",
"BridgeTowerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 290 |
def A ( _UpperCAmelCase : int ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
_UpperCAmelCase = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
_UpperCAmelCase = 1
if upper_limit > 0:
_UpperCAmelCase = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(_UpperCAmelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
UpperCAmelCase__ = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 290 | 1 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
a__ = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
a__ = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
a__ = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
return float((preds == labels).mean() )
def lowercase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
_snake_case : Any = simple_accuracy(__lowerCamelCase , __lowerCamelCase )
_snake_case : Union[str, Any] = float(fa_score(y_true=__lowerCamelCase , y_pred=__lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]:
_snake_case : Dict = float(pearsonr(__lowerCamelCase , __lowerCamelCase )[0] )
_snake_case : int = float(spearmanr(__lowerCamelCase , __lowerCamelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict) -> Any:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32"""),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32"""),
}) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any]) -> int:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__lowercase , __lowercase)}
elif self.config_name == "stsb":
return pearson_and_spearman(__lowercase , __lowercase)
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__lowercase , __lowercase)
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__lowercase , __lowercase)}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""")
| 317 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : int ):
__UpperCAmelCase : List[Any] = checkpoint
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Dict = vae_state_dict["""encoder.conv_in.weight"""]
__UpperCAmelCase : Optional[Any] = vae_state_dict["""encoder.conv_in.bias"""]
__UpperCAmelCase : Dict = vae_state_dict["""encoder.conv_out.weight"""]
__UpperCAmelCase : Union[str, Any] = vae_state_dict["""encoder.conv_out.bias"""]
__UpperCAmelCase : List[Any] = vae_state_dict["""encoder.norm_out.weight"""]
__UpperCAmelCase : Tuple = vae_state_dict["""encoder.norm_out.bias"""]
__UpperCAmelCase : Dict = vae_state_dict["""decoder.conv_in.weight"""]
__UpperCAmelCase : Tuple = vae_state_dict["""decoder.conv_in.bias"""]
__UpperCAmelCase : Optional[int] = vae_state_dict["""decoder.conv_out.weight"""]
__UpperCAmelCase : Optional[int] = vae_state_dict["""decoder.conv_out.bias"""]
__UpperCAmelCase : Optional[Any] = vae_state_dict["""decoder.norm_out.weight"""]
__UpperCAmelCase : Union[str, Any] = vae_state_dict["""decoder.norm_out.bias"""]
__UpperCAmelCase : Optional[int] = vae_state_dict["""quant_conv.weight"""]
__UpperCAmelCase : int = vae_state_dict["""quant_conv.bias"""]
__UpperCAmelCase : Union[str, Any] = vae_state_dict["""post_quant_conv.weight"""]
__UpperCAmelCase : Any = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
__UpperCAmelCase : int = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
__UpperCAmelCase : Optional[Any] = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__lowerCamelCase )
}
# Retrieves the keys for the decoder up blocks only
__UpperCAmelCase : Dict = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
__UpperCAmelCase : Optional[int] = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__lowerCamelCase )
}
for i in range(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
__UpperCAmelCase : Optional[Any] = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
__UpperCAmelCase : int = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
__UpperCAmelCase : Optional[int] = renew_vae_resnet_paths(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {"""old""": f"""down.{i}.block""", """new""": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
__UpperCAmelCase : Tuple = [key for key in vae_state_dict if """encoder.mid.block""" in key]
__UpperCAmelCase : Optional[Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__UpperCAmelCase : Dict = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
__UpperCAmelCase : Tuple = renew_vae_resnet_paths(__lowerCamelCase )
__UpperCAmelCase : Tuple = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
__UpperCAmelCase : List[Any] = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
__UpperCAmelCase : str = renew_vae_attention_paths(__lowerCamelCase )
__UpperCAmelCase : List[str] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
conv_attn_to_linear(__lowerCamelCase )
for i in range(__lowerCamelCase ):
__UpperCAmelCase : Optional[Any] = num_up_blocks - 1 - i
__UpperCAmelCase : Union[str, Any] = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
__UpperCAmelCase : int = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
__UpperCAmelCase : Dict = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
__UpperCAmelCase : Dict = renew_vae_resnet_paths(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {"""old""": f"""up.{block_id}.block""", """new""": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
__UpperCAmelCase : Tuple = [key for key in vae_state_dict if """decoder.mid.block""" in key]
__UpperCAmelCase : Union[str, Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__UpperCAmelCase : Dict = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
__UpperCAmelCase : List[Any] = renew_vae_resnet_paths(__lowerCamelCase )
__UpperCAmelCase : int = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
__UpperCAmelCase : Dict = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
__UpperCAmelCase : List[Any] = renew_vae_attention_paths(__lowerCamelCase )
__UpperCAmelCase : List[str] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
conv_attn_to_linear(__lowerCamelCase )
return new_checkpoint
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , ):
# Only support V1
__UpperCAmelCase : Optional[int] = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
__UpperCAmelCase : Optional[int] = io.BytesIO(r.content )
__UpperCAmelCase : Dict = OmegaConf.load(__lowerCamelCase )
__UpperCAmelCase : str = 512
__UpperCAmelCase : Any = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
__UpperCAmelCase : List[Any] = {}
with safe_open(__lowerCamelCase , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
__UpperCAmelCase : str = f.get_tensor(__lowerCamelCase )
else:
__UpperCAmelCase : Optional[int] = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )["""state_dict"""]
# Convert the VAE model.
__UpperCAmelCase : Optional[int] = create_vae_diffusers_config(__lowerCamelCase , image_size=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = custom_convert_ldm_vae_checkpoint(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = AutoencoderKL(**__lowerCamelCase )
vae.load_state_dict(__lowerCamelCase )
vae.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
a : Optional[int] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 114 | 0 |
lowerCamelCase : Optional[int] = tuple[float, float, float]
lowerCamelCase : List[Any] = tuple[float, float, float]
def _SCREAMING_SNAKE_CASE ( lowercase : Pointad , lowercase : Pointad ):
'''simple docstring'''
lowerCamelCase_ = end_pointa[0] - end_pointa[0]
lowerCamelCase_ = end_pointa[1] - end_pointa[1]
lowerCamelCase_ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _SCREAMING_SNAKE_CASE ( lowercase : Vectorad , lowercase : Vectorad ):
'''simple docstring'''
lowerCamelCase_ = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowerCamelCase_ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowerCamelCase_ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _SCREAMING_SNAKE_CASE ( lowercase : Vectorad , lowercase : int ):
'''simple docstring'''
return tuple(round(lowercase , lowercase ) for x in vector ) == (0, 0, 0)
def _SCREAMING_SNAKE_CASE ( lowercase : Pointad , lowercase : Pointad , lowercase : Pointad , lowercase : int = 10 ):
'''simple docstring'''
lowerCamelCase_ = create_vector(lowercase , lowercase )
lowerCamelCase_ = create_vector(lowercase , lowercase )
return is_zero_vector(get_ad_vectors_cross(lowercase , lowercase ) , lowercase )
| 371 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = "Hello world! cécé herlolip"
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : str , lowercase : bool ):
'''simple docstring'''
lowerCamelCase_ = FairseqRobertaModel.from_pretrained(lowercase )
roberta.eval() # disable dropout
lowerCamelCase_ = roberta.model.encoder.sentence_encoder
lowerCamelCase_ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
lowerCamelCase_ = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , lowercase )
lowerCamelCase_ = XLMRobertaXLForSequenceClassification(lowercase ) if classification_head else XLMRobertaXLForMaskedLM(lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ = roberta_sent_encoder.embed_tokens.weight
lowerCamelCase_ = roberta_sent_encoder.embed_positions.weight
lowerCamelCase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowerCamelCase_ = roberta_sent_encoder.layer_norm.weight
lowerCamelCase_ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ = model.roberta.encoder.layer[i]
lowerCamelCase_ = roberta_sent_encoder.layers[i]
lowerCamelCase_ = layer.attention
lowerCamelCase_ = roberta_layer.self_attn_layer_norm.weight
lowerCamelCase_ = roberta_layer.self_attn_layer_norm.bias
# self attention
lowerCamelCase_ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowerCamelCase_ = roberta_layer.self_attn.q_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.q_proj.bias
lowerCamelCase_ = roberta_layer.self_attn.k_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.k_proj.bias
lowerCamelCase_ = roberta_layer.self_attn.v_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowerCamelCase_ = roberta_layer.self_attn.out_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowerCamelCase_ = roberta_layer.final_layer_norm.weight
lowerCamelCase_ = roberta_layer.final_layer_norm.bias
# intermediate
lowerCamelCase_ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCamelCase_ = roberta_layer.fca.weight
lowerCamelCase_ = roberta_layer.fca.bias
# output
lowerCamelCase_ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCamelCase_ = roberta_layer.fca.weight
lowerCamelCase_ = roberta_layer.fca.bias
# end of layer
if classification_head:
lowerCamelCase_ = roberta.model.classification_heads['mnli'].dense.weight
lowerCamelCase_ = roberta.model.classification_heads['mnli'].dense.bias
lowerCamelCase_ = roberta.model.classification_heads['mnli'].out_proj.weight
lowerCamelCase_ = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
lowerCamelCase_ = roberta.model.encoder.lm_head.dense.weight
lowerCamelCase_ = roberta.model.encoder.lm_head.dense.bias
lowerCamelCase_ = roberta.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ = roberta.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ = roberta.model.encoder.lm_head.weight
lowerCamelCase_ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ = roberta.encode(lowercase ).unsqueeze(0 ) # batch of size 1
lowerCamelCase_ = model(lowercase )[0]
if classification_head:
lowerCamelCase_ = roberta.model.classification_heads['mnli'](roberta.extract_features(lowercase ) )
else:
lowerCamelCase_ = roberta.model(lowercase )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase_ = torch.allclose(lowercase , lowercase , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(lowercase ).mkdir(parents=lowercase , exist_ok=lowercase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
lowerCamelCase : List[Any] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 208 | 0 |
def UpperCAmelCase_( a__ ):
"""simple docstring"""
assert isinstance(__lowercase , __lowercase ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE : Any = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(__lowercase )
else:
SCREAMING_SNAKE_CASE : Any = sylvester(number - 1 )
SCREAMING_SNAKE_CASE : Any = num - 1
SCREAMING_SNAKE_CASE : List[str] = num
return lower * upper + 1
if __name__ == "__main__":
print(F"The 8th number in Sylvester's sequence: {sylvester(8)}")
| 313 |
'''simple docstring'''
from PIL import Image
def __lowercase ( __lowercase , __lowercase ) -> Image:
'''simple docstring'''
_A = (259 * (level + 255)) / (255 * (259 - level))
def contrast(__lowercase ) -> int:
return int(128 + factor * (c - 128) )
return img.point(__lowercase )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
lowerCamelCase_ = change_contrast(img, 1_70)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 79 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __lowerCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = 'roformer'
def __init__( self , _a=50_000 , _a=None , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1_536 , _a=2 , _a=0.02 , _a=1E-12 , _a=0 , _a=False , _a=True , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__a = vocab_size
__a = hidden_size if embedding_size is None else embedding_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = rotary_value
__a = use_cache
class __lowerCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ):
if self.task == "multiple-choice":
__a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a = {0: """batch""", 1: """sequence"""}
__a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 350 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (DDPMScheduler,)
def _lowercase ( self : Optional[int], **UpperCAmelCase__ : List[Any] ):
__lowercase = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase__ )
return config
def _lowercase ( self : Tuple ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def _lowercase ( self : Any ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase__, beta_end=UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__ )
def _lowercase ( self : int ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase__ )
def _lowercase ( self : str ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
self.check_over_configs(thresholding=UpperCAmelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase__, prediction_type=UpperCAmelCase__, sample_max_value=UpperCAmelCase__, )
def _lowercase ( self : Tuple ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__ )
def _lowercase ( self : Dict ):
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=UpperCAmelCase__ )
def _lowercase ( self : str ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**UpperCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def _lowercase ( self : Union[str, Any] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**UpperCAmelCase__ )
__lowercase = len(UpperCAmelCase__ )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase__ ) ):
# 1. predict noise residual
__lowercase = model(UpperCAmelCase__, UpperCAmelCase__ )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, generator=UpperCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(UpperCAmelCase__ ) )
__lowercase = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def _lowercase ( self : List[str] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(prediction_type="v_prediction" )
__lowercase = scheduler_class(**UpperCAmelCase__ )
__lowercase = len(UpperCAmelCase__ )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase__ ) ):
# 1. predict noise residual
__lowercase = model(UpperCAmelCase__, UpperCAmelCase__ )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, generator=UpperCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(UpperCAmelCase__ ) )
__lowercase = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def _lowercase ( self : Optional[Any] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**UpperCAmelCase__ )
__lowercase = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
__lowercase = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase__ ):
if i == len(UpperCAmelCase__ ) - 1:
__lowercase = -1
else:
__lowercase = timesteps[i + 1]
__lowercase = scheduler.previous_timestep(UpperCAmelCase__ )
__lowercase = prev_t.item()
self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**UpperCAmelCase__ )
__lowercase = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(UpperCAmelCase__, msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
def _lowercase ( self : List[Any] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**UpperCAmelCase__ )
__lowercase = [1_0_0, 8_7, 5_0, 1, 0]
__lowercase = len(UpperCAmelCase__ )
with self.assertRaises(UpperCAmelCase__, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase__, timesteps=UpperCAmelCase__ )
def _lowercase ( self : Optional[int] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**UpperCAmelCase__ )
__lowercase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase__, msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
| 17 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self,__lowerCamelCase,__lowerCamelCase=3,__lowerCamelCase=32,__lowerCamelCase=3,__lowerCamelCase=10,__lowerCamelCase=[10, 20, 30, 40],__lowerCamelCase=[1, 1, 2, 1],__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase="relu",__lowerCamelCase=3,__lowerCamelCase=None,):
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = embeddings_size
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = num_labels
A__ = scope
A__ = len(__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = self.get_config()
return config, pixel_values
def UpperCamelCase ( self ):
return RegNetConfig(
num_channels=self.num_channels,embeddings_size=self.embeddings_size,hidden_sizes=self.hidden_sizes,depths=self.depths,hidden_act=self.hidden_act,num_labels=self.num_labels,image_size=self.image_size,)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = FlaxRegNetModel(config=__lowerCamelCase )
A__ = model(__lowerCamelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32),)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = self.num_labels
A__ = FlaxRegNetForImageClassification(config=__lowerCamelCase )
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def UpperCamelCase ( self ):
A__ = self.prepare_config_and_inputs()
A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ):
A__ = FlaxRegNetModelTester(self )
A__ = ConfigTester(self,config_class=__lowerCamelCase,has_text_modality=__lowerCamelCase )
def UpperCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
return
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def UpperCamelCase ( self ):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__lowerCamelCase )
A__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1],__lowerCamelCase )
def UpperCamelCase ( self ):
def check_hidden_states_output(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = model_class(__lowerCamelCase )
A__ = model(**self._prepare_for_class(__lowerCamelCase,__lowerCamelCase ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ),expected_num_stages + 1 )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A__ = self._prepare_for_class(__lowerCamelCase,__lowerCamelCase )
A__ = model_class(__lowerCamelCase )
@jax.jit
def model_jitted(__lowerCamelCase,**__lowerCamelCase ):
return model(pixel_values=__lowerCamelCase,**__lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
A__ = model_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
A__ = model_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ),len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase,__lowerCamelCase ):
self.assertEqual(jitted_output.shape,output.shape )
def UpperCamelCase__( )->Optional[int]:
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ):
A__ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase,return_tensors='''np''' )
A__ = model(**__lowerCamelCase )
# verify the logits
A__ = (1, 1000)
self.assertEqual(outputs.logits.shape,__lowerCamelCase )
A__ = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3],__lowerCamelCase,atol=1E-4 ) )
| 193 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_ = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['PerceiverFeatureExtractor']
SCREAMING_SNAKE_CASE_ = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 358 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a ( UpperCAmelCase ):
_lowercase = ["image_processor", "tokenizer"]
_lowercase = "OwlViTImageProcessor"
_lowercase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , A_=None , A_=None , **A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , A_ , )
_UpperCAmelCase : Union[str, Any] = kwargs.pop("feature_extractor" )
_UpperCAmelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(A_ , A_ )
def __call__( self , A_=None , A_=None , A_=None , A_="max_length" , A_="np" , **A_ ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(A_ , A_ ) or (isinstance(A_ , A_ ) and not isinstance(text[0] , A_ )):
_UpperCAmelCase : Optional[int] = [self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )]
elif isinstance(A_ , A_ ) and isinstance(text[0] , A_ ):
_UpperCAmelCase : Optional[int] = []
# Maximum number of queries across batch
_UpperCAmelCase : Optional[Any] = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
_UpperCAmelCase : Optional[int] = t + [" "] * (max_num_queries - len(A_ ))
_UpperCAmelCase : str = self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )
encodings.append(A_ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_UpperCAmelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : Tuple = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_UpperCAmelCase : Optional[Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : str = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_UpperCAmelCase : str = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_UpperCAmelCase : Dict = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_UpperCAmelCase : Union[str, Any] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : Optional[int] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_UpperCAmelCase : Optional[int] = BatchEncoding()
_UpperCAmelCase : str = input_ids
_UpperCAmelCase : Optional[Any] = attention_mask
if query_images is not None:
_UpperCAmelCase : int = BatchEncoding()
_UpperCAmelCase : str = self.image_processor(
A_ , return_tensors=A_ , **A_ ).pixel_values
_UpperCAmelCase : Optional[Any] = query_pixel_values
if images is not None:
_UpperCAmelCase : int = self.image_processor(A_ , return_tensors=A_ , **A_ )
if text is not None and images is not None:
_UpperCAmelCase : Optional[int] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_UpperCAmelCase : Any = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ) , tensor_type=A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , A_ , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , A_ , )
return self.image_processor
| 189 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_UpperCAmelCase : List[Any] = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_UpperCAmelCase : Union[str, Any] = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_UpperCAmelCase : Optional[int] = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def A_ ( self : str ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def A_ ( self : Any , UpperCAmelCase : List[List[List[str]]] , UpperCAmelCase : List[List[str]] , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCAmelCase , hypotheses=UpperCAmelCase , min_len=UpperCAmelCase , max_len=UpperCAmelCase )
}
| 50 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 100_0000 ) -> int:
lowerCamelCase__ : int = limit + 1
lowerCamelCase__ : Optional[Any] = [0] * limit
for first_term in range(1 , _UpperCAmelCase ):
for n in range(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Optional[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowerCamelCase__ : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _A ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor if is_vision_available() else None
@property
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : str = (3, 32, 128)
__UpperCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
__UpperCAmelCase : Any = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__UpperCAmelCase : Optional[int] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + """\n""" )
__UpperCAmelCase : List[Any] = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
__UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def __A ( self , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __A ( self , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __A ( self ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
__UpperCAmelCase : Dict = Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) )
return image_input
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : Optional[Any] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Tuple = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : List[Any] = self.get_image_processor()
__UpperCAmelCase : Dict = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__UpperCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
__UpperCAmelCase : List[Any] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = self.get_image_processor()
__UpperCAmelCase : Tuple = self.get_tokenizer()
__UpperCAmelCase : Tuple = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : List[str] = self.prepare_image_inputs()
__UpperCAmelCase : str = image_processor(__UpperCAmelCase , return_tensors="""np""" )
__UpperCAmelCase : int = processor(images=__UpperCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = self.get_image_processor()
__UpperCAmelCase : List[Any] = self.get_tokenizer()
__UpperCAmelCase : int = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : Dict = """test"""
__UpperCAmelCase : Union[str, Any] = processor(text=__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.get_image_processor()
__UpperCAmelCase : Tuple = self.get_tokenizer()
__UpperCAmelCase : Optional[int] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : List[Any] = """test"""
__UpperCAmelCase : int = self.prepare_image_inputs()
__UpperCAmelCase : Tuple = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : List[Any] = self.get_tokenizer()
__UpperCAmelCase : List[str] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : Optional[Any] = processor.char_decode(__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(__UpperCAmelCase )
__UpperCAmelCase : int = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Dict = self.get_image_processor()
__UpperCAmelCase : Optional[Any] = self.get_tokenizer()
__UpperCAmelCase : Any = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : str = None
__UpperCAmelCase : Dict = self.prepare_image_inputs()
__UpperCAmelCase : Union[str, Any] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : Any = self.get_image_processor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : str = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : Tuple = torch.randn(1 , 27 , 38 )
__UpperCAmelCase : Union[str, Any] = torch.randn(1 , 27 , 50_257 )
__UpperCAmelCase : Any = torch.randn(1 , 27 , 30_522 )
__UpperCAmelCase : Tuple = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 16 |
'''simple docstring'''
from collections.abc import Sequence
def lowercase_ ( lowerCAmelCase__ : Sequence[int] | None = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__UpperCAmelCase : Any = nums[0]
for i in range(1 , len(lowerCAmelCase__ ) ):
__UpperCAmelCase : Union[str, Any] = nums[i]
__UpperCAmelCase : List[Any] = max(lowerCAmelCase__ , ans + num , lowerCAmelCase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_UpperCamelCase = int(input('''Enter number of elements : ''').strip())
_UpperCamelCase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 16 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowercase__ ( __snake_case : str , __snake_case : str , __snake_case : str , __snake_case : PreTrainedTokenizer , __snake_case : int , __snake_case : Optional[int] = None , ):
'''simple docstring'''
UpperCAmelCase_ : Any = {}
if train_file is not None:
UpperCAmelCase_ : Any = [train_file]
if eval_file is not None:
UpperCAmelCase_ : int = [eval_file]
if test_file is not None:
UpperCAmelCase_ : str = [test_file]
UpperCAmelCase_ : int = datasets.load_dataset('csv' , data_files=__snake_case )
UpperCAmelCase_ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
UpperCAmelCase_ : List[str] = features_name.pop(__snake_case )
UpperCAmelCase_ : Any = list(set(ds[list(files.keys() )[0]][label_name] ) )
UpperCAmelCase_ : List[str] = {label: i for i, label in enumerate(__snake_case )}
UpperCAmelCase_ : Dict = tokenizer.model_input_names
UpperCAmelCase_ : Optional[int] = {}
if len(__snake_case ) == 1:
for k in files.keys():
UpperCAmelCase_ : List[Any] = ds[k].map(
lambda __snake_case : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__snake_case , max_length=__snake_case , padding='max_length' ) , batched=__snake_case , )
elif len(__snake_case ) == 2:
for k in files.keys():
UpperCAmelCase_ : Union[str, Any] = ds[k].map(
lambda __snake_case : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__snake_case , max_length=__snake_case , padding='max_length' , ) , batched=__snake_case , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
UpperCAmelCase_ : Tuple = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase_ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
UpperCAmelCase_ : Any = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase_ : List[str] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
UpperCAmelCase_ : Any = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase_ : Dict = labelaid[ex[label_name]]
yield (d, label)
UpperCAmelCase_ : str = (
tf.data.Dataset.from_generator(
__snake_case , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
UpperCAmelCase_ : Tuple = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
UpperCAmelCase_ : str = (
tf.data.Dataset.from_generator(
__snake_case , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
UpperCAmelCase_ : int = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
UpperCAmelCase_ : List[str] = (
tf.data.Dataset.from_generator(
__snake_case , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
UpperCAmelCase_ : List[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
'''simple docstring'''
_snake_case : int = field(metadata={'''help''': '''Which column contains the label'''} )
_snake_case : str = field(default=_snake_case , metadata={'''help''': '''The path of the training file'''} )
_snake_case : Optional[str] = field(default=_snake_case , metadata={'''help''': '''The path of the development file'''} )
_snake_case : Optional[str] = field(default=_snake_case , metadata={'''help''': '''The path of the test file'''} )
_snake_case : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_snake_case : bool = field(
default=_snake_case , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class lowerCamelCase :
'''simple docstring'''
_snake_case : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_snake_case : Optional[str] = field(
default=_snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_snake_case : Optional[str] = field(
default=_snake_case , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_snake_case : bool = field(default=_snake_case , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_snake_case : Optional[str] = field(
default=_snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
F"16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__snake_case , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
UpperCAmelCase_ : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__snake_case ) , labelaid=__snake_case , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
UpperCAmelCase_ : Dict = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , )
def compute_metrics(__snake_case : EvalPrediction ) -> Dict:
UpperCAmelCase_ : Tuple = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
UpperCAmelCase_ : List[str] = TFTrainer(
model=__snake_case , args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , compute_metrics=__snake_case , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase_ : List[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
UpperCAmelCase_ : Dict = trainer.evaluate()
UpperCAmelCase_ : Dict = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(__snake_case , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
results.update(__snake_case )
return results
if __name__ == "__main__":
main()
| 29 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase : Optional[Any] =[8, 5, 9, 7]
__lowerCAmelCase : Optional[int] =[
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase : List[Any] =[
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCAmelCase :
def __init__( self :Optional[int] , lowercase_ :list[int] , lowercase_ :list[list[int]] , lowercase_ :list[list[int]] , )-> None:
A__ = claim_vector
A__ = allocated_resources_table
A__ = maximum_claim_table
def UpperCAmelCase_ ( self :Union[str, Any] )-> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCAmelCase_ ( self :Tuple )-> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCAmelCase_ ( self :Dict )-> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(lowercase_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCAmelCase_ ( self :Any )-> dict[int, list[int]]:
return {self.__need().index(lowercase_ ): i for i in self.__need()}
def UpperCAmelCase_ ( self :Tuple , **lowercase_ :List[str] )-> None:
A__ = self.__need()
A__ = self.__allocated_resources_table
A__ = self.__available_resources()
A__ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
A__ = False
for each_need in need_list:
A__ = True
for index, need in enumerate(lowercase_ ):
if need > available_resources[index]:
A__ = False
break
if execution:
A__ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
A__ = original_need_index
print(F"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(lowercase_ )
# update available/freed resources stack
A__ = np.array(lowercase_ ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(lowercase_ ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def UpperCAmelCase_ ( self :Optional[Any] )-> Tuple:
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
F"P{self.__allocated_resources_table.index(lowercase_ ) + 1}"
+ " ".join(F"{it:>8}" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
F"P{self.__maximum_claim_table.index(lowercase_ ) + 1}"
+ " ".join(F"{it:>8}" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(lowercase_ ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(lowercase_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237 | 0 |
import argparse
lowercase_ = """docs/source/_static/js/custom.js"""
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
with open(__snake_case , encoding='utf-8' , newline='\n' ) as f:
lowercase__ = f.readlines()
lowercase__ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
lowercase__ = F"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F""" \"v{version}\": \"v{version}\",\n"""
with open(__snake_case , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__snake_case )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
lowercase_ = parser.parse_args()
update_custom_js(args.version)
| 356 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowercase_ = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 269 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Dict = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : int = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : int = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 127 |
class A__ :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case ):
snake_case = name
snake_case = value
snake_case = weight
def __repr__( self ):
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def a_ ( self ):
return self.value
def a_ ( self ):
return self.name
def a_ ( self ):
return self.weight
def a_ ( self ):
return self.value / self.weight
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = []
for i in range(len(UpperCamelCase_ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = sorted(UpperCamelCase_ ,key=UpperCamelCase_ ,reverse=UpperCamelCase_ )
snake_case = []
snake_case , snake_case = 0.0, 0.0
for i in range(len(UpperCamelCase_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCAmelCase__ ():
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 127 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
a : List[str] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
a : Tuple = BASE_URL + '''/user'''
# https://github.com/settings/tokens
a : Tuple = os.environ.get('''USER_TOKEN''', '''''')
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->dict[Any, Any]:
'''simple docstring'''
a : Dict = {
"Authorization": F"""token {auth_token}""",
"Accept": "application/vnd.github.v3+json",
}
return requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 354 |
"""simple docstring"""
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : List[str] = logging.get_logger(__name__)
a : Optional[int] = '''T5Config'''
def _SCREAMING_SNAKE_CASE ( _lowercase : jnp.array , _lowercase : int , _lowercase : int ) ->jnp.ndarray:
'''simple docstring'''
a : Tuple = jnp.zeros_like(_lowercase )
a : Tuple = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
a : Dict = shifted_input_ids.at[:, 0].set(_lowercase )
a : Optional[Any] = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase )
return shifted_input_ids
class __UpperCamelCase ( a__ ):
lowerCamelCase : Any ="""mt5"""
lowerCamelCase : Dict =MTaConfig
class __UpperCamelCase ( a__ ):
lowerCamelCase : str ="""mt5"""
lowerCamelCase : Tuple =MTaConfig
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[str] ="""mt5"""
lowerCamelCase : Tuple =MTaConfig
| 79 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Tuple = 'blip_2_vision_model'
def __init__( self : List[Any] ,_UpperCAmelCase : List[Any]=1408 ,_UpperCAmelCase : Optional[int]=6144 ,_UpperCAmelCase : List[str]=39 ,_UpperCAmelCase : Dict=16 ,_UpperCAmelCase : Optional[Any]=224 ,_UpperCAmelCase : Any=14 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Optional[int]=0.0_00_01 ,_UpperCAmelCase : Tuple=0.0 ,_UpperCAmelCase : Union[str, Any]=1E-10 ,_UpperCAmelCase : Any=True ,**_UpperCAmelCase : Dict ,):
super().__init__(**_UpperCAmelCase )
_a : int = hidden_size
_a : List[str] = intermediate_size
_a : List[Any] = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Tuple = patch_size
_a : List[str] = image_size
_a : int = initializer_range
_a : Union[str, Any] = attention_dropout
_a : Union[str, Any] = layer_norm_eps
_a : List[str] = hidden_act
_a : Any = qkv_bias
@classmethod
def __lowercase ( cls : Tuple ,_UpperCAmelCase : Union[str, os.PathLike] ,**_UpperCAmelCase : Any ):
cls._set_token_in_kwargs(_UpperCAmelCase )
_a , _a : List[Any] = cls.get_config_dict(_UpperCAmelCase ,**_UpperCAmelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_a : Union[str, Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Tuple = 'blip_2_qformer'
def __init__( self : int ,_UpperCAmelCase : List[str]=30522 ,_UpperCAmelCase : Dict=768 ,_UpperCAmelCase : Tuple=12 ,_UpperCAmelCase : Any=12 ,_UpperCAmelCase : int=3072 ,_UpperCAmelCase : str="gelu" ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : str=0.1 ,_UpperCAmelCase : int=512 ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : List[str]=1E-12 ,_UpperCAmelCase : int=0 ,_UpperCAmelCase : Optional[int]="absolute" ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Any=1408 ,**_UpperCAmelCase : Optional[int] ,):
super().__init__(pad_token_id=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Any = vocab_size
_a : Optional[int] = hidden_size
_a : Dict = num_hidden_layers
_a : Dict = num_attention_heads
_a : str = hidden_act
_a : Any = intermediate_size
_a : Dict = hidden_dropout_prob
_a : str = attention_probs_dropout_prob
_a : Union[str, Any] = max_position_embeddings
_a : Tuple = initializer_range
_a : Optional[int] = layer_norm_eps
_a : int = position_embedding_type
_a : Optional[Any] = cross_attention_frequency
_a : List[str] = encoder_hidden_size
@classmethod
def __lowercase ( cls : Dict ,_UpperCAmelCase : Union[str, os.PathLike] ,**_UpperCAmelCase : Union[str, Any] ):
cls._set_token_in_kwargs(_UpperCAmelCase )
_a , _a : str = cls.get_config_dict(_UpperCAmelCase ,**_UpperCAmelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_a : Any = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Tuple = 'blip-2'
lowerCAmelCase : Any = True
def __init__( self : Any ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Tuple=None ,_UpperCAmelCase : Dict=None ,_UpperCAmelCase : int=32 ,**_UpperCAmelCase : str ):
super().__init__(**_UpperCAmelCase )
if vision_config is None:
_a : str = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_a : List[str] = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_a : List[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_a : Optional[Any] = BlipaVisionConfig(**_UpperCAmelCase )
_a : Tuple = BlipaQFormerConfig(**_UpperCAmelCase )
_a : Optional[int] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_a : Any = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
_a : Dict = self.text_config.tie_word_embeddings
_a : Union[str, Any] = self.text_config.is_encoder_decoder
_a : int = num_query_tokens
_a : str = self.vision_config.hidden_size
_a : Optional[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_a : Optional[int] = 1.0
_a : str = 0.02
@classmethod
def __lowercase ( cls : str ,_UpperCAmelCase : BlipaVisionConfig ,_UpperCAmelCase : BlipaQFormerConfig ,_UpperCAmelCase : PretrainedConfig ,**_UpperCAmelCase : List[Any] ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**_UpperCAmelCase ,)
def __lowercase ( self : int ):
_a : Dict = copy.deepcopy(self.__dict__ )
_a : Any = self.vision_config.to_dict()
_a : Optional[int] = self.qformer_config.to_dict()
_a : Dict = self.text_config.to_dict()
_a : Optional[Any] = self.__class__.model_type
return output
| 89 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase__ = float('nan')
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase) -> Optional[Any]:
_A : List[Any] = sys.stdout
_A : str = open(__lowerCamelCase , "a")
def __getattr__( self , __lowerCamelCase) -> List[str]:
return getattr(self.stdout , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
self.stdout.write(__lowerCamelCase)
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , __lowerCamelCase , 0 , re.M))
def _UpperCAmelCase (UpperCamelCase__ : str=80 , UpperCamelCase__ : Tuple=False ):
_A : Tuple = []
# deal with critical env vars
_A : Dict = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
_A : Optional[int] = os.environ.get(UpperCamelCase__ , UpperCamelCase__ )
if val is not None:
cmd.append(f"{key}={val}" )
# python executable (not always needed if the script is executable)
_A : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(UpperCamelCase__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_A : Tuple = []
_A : Dict = ""
while len(UpperCamelCase__ ) > 0:
current_line += f"{cmd.pop(0 )} "
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase__ )
_A : Union[str, Any] = ""
return "\\\n".join(UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ):
# unwrap multi-line input
_A : Union[str, Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
_A : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
_A : int = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
_A : Dict = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
_A : Tuple = variation.replace(" " , "-" )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stdout.txt" , "w" ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stderr.txt" , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f:
_A : List[str] = json.load(UpperCamelCase__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ):
_A : Union[str, Any] = []
_A : Optional[int] = []
_A : Any = f"{id}: {variation:<{longest_variation_len}}"
_A : Dict = f"{preamble}: "
_A : Union[str, Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ):
_A : Optional[Any] = process_run_single(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : Optional[Any] = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase__ ):
metrics.append(UpperCamelCase__ )
results.append(UpperCamelCase__ )
outcome += "✓"
else:
outcome += "✘"
_A : str = f"\33[2K\r{outcome}"
if len(UpperCamelCase__ ) > 0:
_A : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_A : Any = round(mean_metrics[target_metric_key] , 2 )
_A : Tuple = f"{outcome} {mean_target}"
if len(UpperCamelCase__ ) > 1:
results_str += f" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}"
print(UpperCamelCase__ )
_A : Optional[int] = variation
return mean_metrics
else:
print(UpperCamelCase__ )
return {variation_key: variation, target_metric_key: nan}
def _UpperCAmelCase ():
_A : int = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
_A : Any = pd.DataFrame(UpperCamelCase__ )
_A : List[str] = "variation"
_A : List[Any] = "diff_%"
_A : int = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_A : int = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase__ ):
# as a fallback, use the minimal value as the sentinel
_A : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase__ ):
_A : Optional[Any] = df.apply(
lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
_A : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_A : Any = df.reindex(UpperCamelCase__ , axis="columns" ) # reorder cols
# capitalize
_A : Tuple = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
_A : List[str] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "<br>" ) , axis="columns" )
_A : Union[str, Any] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "\n" ) , axis="columns" )
_A : Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
print("\n\n".join(UpperCamelCase__ ) )
def _UpperCAmelCase ():
_A : int = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Base cmd" , )
parser.add_argument(
"--variations" , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs="+" , required=UpperCamelCase__ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=UpperCamelCase__ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=UpperCamelCase__ , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=UpperCamelCase__ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=UpperCamelCase__ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
_A : int = parser.parse_args()
_A : Union[str, Any] = args.output_dir
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
_A : Tuple = get_base_command(UpperCamelCase__ , UpperCamelCase__ )
# split each dimension into its --foo variations
_A : Dict = [list(map(str.strip , re.split(r"\|" , UpperCamelCase__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_A : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*UpperCamelCase__ ) ) ) )
_A : Union[str, Any] = max(len(UpperCamelCase__ ) for x in variations )
# split wanted keys
_A : str = args.report_metric_keys.split()
# capture prints into a log file for convenience
_A : Optional[int] = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(f"and this script's output is also piped into {report_fn}" )
_A : Tuple = Tee(UpperCamelCase__ )
print(f"\n*** Running {len(UpperCamelCase__ )} benchmarks:" )
print(f"Base command: {' '.join(UpperCamelCase__ )}" )
_A : str = "variation"
_A : Union[str, Any] = []
for id, variation in enumerate(tqdm(UpperCamelCase__ , desc="Total completion: " , leave=UpperCamelCase__ ) ):
_A : Dict = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) )
process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 11 | 0 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ = TypeVar('KEY')
SCREAMING_SNAKE_CASE__ = TypeVar('VAL')
@dataclass(frozen=lowerCamelCase , slots=lowerCamelCase )
class a_ ( Generic[KEY, VAL] ):
lowercase = 42
lowercase = 42
class a_ ( _Item ):
def __init__( self ) -> None:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __bool__( self ) -> bool:
"""simple docstring"""
return False
SCREAMING_SNAKE_CASE__ = _DeletedItem()
class a_ ( MutableMapping[KEY, VAL] ):
def __init__( self , _SCREAMING_SNAKE_CASE = 8 , _SCREAMING_SNAKE_CASE = 0.7_5 ) -> None:
"""simple docstring"""
UpperCamelCase = initial_block_size
UpperCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
UpperCamelCase = capacity_factor
UpperCamelCase = 0
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return hash(_SCREAMING_SNAKE_CASE ) % len(self._buckets )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
UpperCamelCase = self._buckets[ind]
if not stored:
UpperCamelCase = _Item(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self._len += 1
return True
elif stored.key == key:
UpperCamelCase = _Item(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return True
else:
return False
def A__ ( self ) -> bool:
"""simple docstring"""
UpperCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> bool:
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = self._buckets
UpperCamelCase = [None] * new_size
UpperCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def A__ ( self ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def A__ ( self ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Iterator[int]:
"""simple docstring"""
UpperCamelCase = self._get_bucket_index(_SCREAMING_SNAKE_CASE )
for _ in range(len(self._buckets ) ):
yield ind
UpperCamelCase = self._get_next_ind(_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(_SCREAMING_SNAKE_CASE ):
if self._try_set(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
break
def __setitem__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __delitem__( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = self._buckets[ind]
if item is None:
raise KeyError(_SCREAMING_SNAKE_CASE )
if item is _deleted:
continue
if item.key == key:
UpperCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> VAL:
"""simple docstring"""
for ind in self._iterate_buckets(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_SCREAMING_SNAKE_CASE )
def __len__( self ) -> int:
"""simple docstring"""
return self._len
def __iter__( self ) -> Iterator[KEY]:
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
"""simple docstring"""
UpperCamelCase = """ ,""".join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 183 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger('transformers.models.speecht5')
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
hf_model.apply_weight_norm()
UpperCamelCase = checkpoint["""input_conv.weight_g"""]
UpperCamelCase = checkpoint["""input_conv.weight_v"""]
UpperCamelCase = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase = checkpoint[F"upsamples.{i}.1.weight_g"]
UpperCamelCase = checkpoint[F"upsamples.{i}.1.weight_v"]
UpperCamelCase = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
UpperCamelCase = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , )-> List[Any]:
if config_path is not None:
UpperCamelCase = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase )
else:
UpperCamelCase = SpeechTaHifiGanConfig()
UpperCamelCase = SpeechTaHifiGan(__UpperCamelCase )
UpperCamelCase = torch.load(__UpperCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , __UpperCamelCase , __UpperCamelCase )
UpperCamelCase = np.load(__UpperCamelCase )
UpperCamelCase = stats[0].reshape(-1 )
UpperCamelCase = stats[1].reshape(-1 )
UpperCamelCase = torch.from_numpy(__UpperCamelCase ).float()
UpperCamelCase = torch.from_numpy(__UpperCamelCase ).float()
model.save_pretrained(__UpperCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 183 | 1 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __UpperCAmelCase ( __a : Any ) -> Optional[int]:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
a__ = parser.parse_args()
a__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 235 |
'''simple docstring'''
from math import factorial, radians
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 18 , _SCREAMING_SNAKE_CASE = 10 ):
_snake_case = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
_snake_case = radians(_SCREAMING_SNAKE_CASE )
_snake_case = angle_in_radians
_snake_case = 3
_snake_case = -1
for _ in range(_SCREAMING_SNAKE_CASE ):
result += (b * (angle_in_radians**a)) / factorial(_SCREAMING_SNAKE_CASE )
_snake_case = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__import__('doctest').testmod() | 341 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_lowerCamelCase = {'tokenization_herbert': ['HerbertTokenizer']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['HerbertTokenizerFast']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 360 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 'data2vec-text'
def __init__( self : Optional[Any] , __snake_case : Optional[int]=3_05_22 , __snake_case : List[str]=7_68 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Union[str, Any]=30_72 , __snake_case : List[Any]="gelu" , __snake_case : Any=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Tuple=5_12 , __snake_case : str=2 , __snake_case : str=0.02 , __snake_case : List[Any]=1E-12 , __snake_case : Any=1 , __snake_case : List[Any]=0 , __snake_case : Dict=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Any=None , **__snake_case : List[Any] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class a ( _A ):
'''simple docstring'''
@property
def lowerCamelCase_ ( self : str ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 177 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[str] = ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ : int = (3, 32, 128)
lowercase__ : List[str] = tempfile.mkdtemp()
# fmt: off
lowercase__ : Any = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
lowercase__ : Tuple = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_snake_case ) + '''\n''' )
lowercase__ : Optional[Any] = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
lowercase__ : Optional[int] = os.path.join(self.tmpdirname ,_snake_case )
with open(self.image_processor_file ,'''w''' ,encoding='''utf-8''' ) as fp:
json.dump(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Any ,**_snake_case : Dict ) -> List[str]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCAmelCase ( self : Any ,**_snake_case : List[Any] ) -> Dict:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[Any] = np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )
lowercase__ : Any = Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) )
return image_input
def UpperCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : Union[str, Any] = self.get_image_processor()
lowercase__ : int = MgpstrProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Dict = MgpstrProcessor.from_pretrained(self.tmpdirname ,use_fast=_snake_case )
self.assertEqual(processor.char_tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer ,_snake_case )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor ,_snake_case )
def UpperCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Dict = self.get_image_processor()
lowercase__ : List[str] = MgpstrProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor.save_pretrained(self.tmpdirname )
lowercase__ : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' )
lowercase__ : List[str] = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 )
lowercase__ : Dict = MgpstrProcessor.from_pretrained(
self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=_snake_case ,padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer ,_snake_case )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : List[Any] = self.get_image_processor()
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : List[Any] = MgpstrProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
lowercase__ : Tuple = self.prepare_image_inputs()
lowercase__ : Any = image_processor(_snake_case ,return_tensors='''np''' )
lowercase__ : str = processor(images=_snake_case ,return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def UpperCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : str = MgpstrProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
lowercase__ : Tuple = '''test'''
lowercase__ : str = processor(text=_snake_case )
lowercase__ : List[Any] = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
lowercase__ : Any = self.get_image_processor()
lowercase__ : Optional[int] = self.get_tokenizer()
lowercase__ : Union[str, Any] = MgpstrProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
lowercase__ : Union[str, Any] = '''test'''
lowercase__ : List[Any] = self.prepare_image_inputs()
lowercase__ : Tuple = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Union[str, Any] = MgpstrProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
lowercase__ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : List[Any] = processor.char_decode(_snake_case )
lowercase__ : Dict = tokenizer.batch_decode(_snake_case )
lowercase__ : Optional[int] = [seq.replace(''' ''' ,'''''' ) for seq in decoded_tok]
self.assertListEqual(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
lowercase__ : int = self.get_image_processor()
lowercase__ : Union[str, Any] = self.get_tokenizer()
lowercase__ : int = MgpstrProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
lowercase__ : List[Any] = None
lowercase__ : List[Any] = self.prepare_image_inputs()
lowercase__ : Tuple = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : Any = MgpstrProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
lowercase__ : Tuple = torch.randn(1 ,27 ,38 )
lowercase__ : int = torch.randn(1 ,27 ,50_257 )
lowercase__ : List[Any] = torch.randn(1 ,27 ,30_522 )
lowercase__ : Any = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) ,['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 16 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCAmelCase_ = 4
lowerCAmelCase_ = 3
class __A ( A_ ):
'''simple docstring'''
pass
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
for shard in shards:
for i in range(__lowerCamelCase ):
yield {"i": i, "shard": shard}
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : int = int(os.environ['''RANK'''] )
lowercase__ : str = int(os.environ['''WORLD_SIZE'''] )
lowercase__ : List[Any] = ArgumentParser()
parser.add_argument('''--streaming''' , type=__lowerCamelCase )
parser.add_argument('''--local_rank''' , type=__lowerCamelCase )
parser.add_argument('''--num_workers''' , type=__lowerCamelCase , default=0 )
lowercase__ : int = parser.parse_args()
lowercase__ : Optional[Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Optional[Any] = {'''shards''': [f"""shard_{shard_idx}""" for shard_idx in range(__lowerCamelCase )]}
lowercase__ : Dict = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase )
if not streaming:
lowercase__ : int = Dataset.from_list(list(__lowerCamelCase ) )
lowercase__ : int = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase )
lowercase__ : Optional[Any] = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase )
lowercase__ : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase__ : str = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 16 | 1 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
def wrapper(*_lowerCAmelCase : List[str], **_lowerCAmelCase : Any ):
_a = timeit.default_timer()
_a = func(*_lowerCAmelCase, **_lowerCAmelCase )
_a = timeit.default_timer() - starttime
return delta
_a = func.__name__
return wrapper
def A_ ( _lowerCAmelCase : dict, _lowerCAmelCase : Union[str, Any]=1_00, _lowerCAmelCase : Union[str, Any]=None ):
"""simple docstring"""
_a = []
_a = seq_shapes or {}
for i in range(_lowerCAmelCase ):
_a = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_lowerCAmelCase, _ArrayXD ):
_a = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_lowerCAmelCase, datasets.Value ):
if v.dtype == "string":
_a = '''The small grey turtle was surprisingly fast when challenged.'''
else:
_a = np.random.randint(10, size=1 ).astype(v.dtype ).item()
elif isinstance(_lowerCAmelCase, datasets.Sequence ):
while isinstance(_lowerCAmelCase, datasets.Sequence ):
_a = v.feature
_a = seq_shapes[k]
_a = np.random.rand(*_lowerCAmelCase ).astype(v.dtype )
_a = data
dummy_data.append((i, example) )
return dummy_data
def A_ ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : Any, _lowerCAmelCase : int=1_00, _lowerCAmelCase : Tuple=None ):
"""simple docstring"""
_a = generate_examples(_lowerCAmelCase, num_examples=_lowerCAmelCase, seq_shapes=_lowerCAmelCase )
with ArrowWriter(features=_lowerCAmelCase, path=_lowerCAmelCase ) as writer:
for key, record in dummy_data:
_a = features.encode_example(_lowerCAmelCase )
writer.write(_lowerCAmelCase )
_a , _a = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
_a = datasets.Dataset.from_file(filename=_lowerCAmelCase, info=datasets.DatasetInfo(features=_lowerCAmelCase ) )
return dataset | 153 |
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Tuple, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_a = StableDiffusionPipeline.from_pretrained(_lowerCAmelCase, torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
_a = load_file(_lowerCAmelCase )
_a = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
_a = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
_a = pipeline.text_encoder
else:
_a = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
_a = pipeline.unet
# find the target layer
_a = layer_infos.pop(0 )
while len(_lowerCAmelCase ) > -1:
try:
_a = curr_layer.__getattr__(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
_a = layer_infos.pop(0 )
elif len(_lowerCAmelCase ) == 0:
break
except Exception:
if len(_lowerCAmelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
_a = layer_infos.pop(0 )
_a = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''', '''lora_up''' ) )
pair_keys.append(_lowerCAmelCase )
else:
pair_keys.append(_lowerCAmelCase )
pair_keys.append(key.replace('''lora_up''', '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
_a = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
_a = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_lowerCAmelCase, _lowerCAmelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
_a = state_dict[pair_keys[0]].to(torch.floataa )
_a = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_lowerCAmelCase, _lowerCAmelCase )
# update visited list
for item in pair_keys:
visited.append(_lowerCAmelCase )
return pipeline
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
__snake_case = parser.parse_args()
__snake_case = args.base_model_path
__snake_case = args.checkpoint_path
__snake_case = args.dump_path
__snake_case = args.lora_prefix_unet
__snake_case = args.lora_prefix_text_encoder
__snake_case = args.alpha
__snake_case = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__snake_case = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 153 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Tuple = ["sentencepiece"]
def __init__( self : str ,*_snake_case : List[Any] ,**_snake_case : Dict ) -> int:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Dict = ["sentencepiece"]
def __init__( self : str ,*_snake_case : Any ,**_snake_case : str ) -> Tuple:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = ["sentencepiece"]
def __init__( self : Any ,*_snake_case : Any ,**_snake_case : Optional[Any] ) -> Tuple:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = ["sentencepiece"]
def __init__( self : List[Any] ,*_snake_case : Union[str, Any] ,**_snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = ["sentencepiece"]
def __init__( self : Optional[Any] ,*_snake_case : List[str] ,**_snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Tuple = ["sentencepiece"]
def __init__( self : int ,*_snake_case : Optional[int] ,**_snake_case : Any ) -> Dict:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = ["sentencepiece"]
def __init__( self : Optional[int] ,*_snake_case : Tuple ,**_snake_case : Any ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Dict = ["sentencepiece"]
def __init__( self : List[str] ,*_snake_case : Optional[int] ,**_snake_case : List[Any] ) -> Dict:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Tuple = ["sentencepiece"]
def __init__( self : List[str] ,*_snake_case : str ,**_snake_case : int ) -> List[Any]:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Dict = ["sentencepiece"]
def __init__( self : int ,*_snake_case : Tuple ,**_snake_case : List[Any] ) -> int:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = ["sentencepiece"]
def __init__( self : Optional[Any] ,*_snake_case : Optional[int] ,**_snake_case : int ) -> Optional[int]:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : str = ["sentencepiece"]
def __init__( self : Dict ,*_snake_case : Optional[Any] ,**_snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = ["sentencepiece"]
def __init__( self : Optional[Any] ,*_snake_case : Dict ,**_snake_case : Dict ) -> Any:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = ["sentencepiece"]
def __init__( self : List[str] ,*_snake_case : List[str] ,**_snake_case : List[Any] ) -> Dict:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Tuple = ["sentencepiece"]
def __init__( self : Optional[Any] ,*_snake_case : List[Any] ,**_snake_case : Any ) -> Dict:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : int = ["sentencepiece"]
def __init__( self : List[Any] ,*_snake_case : Tuple ,**_snake_case : Optional[Any] ) -> Any:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : str = ["sentencepiece"]
def __init__( self : str ,*_snake_case : List[Any] ,**_snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : str = ["sentencepiece"]
def __init__( self : Dict ,*_snake_case : Dict ,**_snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Tuple = ["sentencepiece"]
def __init__( self : Union[str, Any] ,*_snake_case : str ,**_snake_case : List[Any] ) -> Any:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = ["sentencepiece"]
def __init__( self : Dict ,*_snake_case : Tuple ,**_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Any = ["sentencepiece"]
def __init__( self : Dict ,*_snake_case : List[str] ,**_snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = ["sentencepiece"]
def __init__( self : Union[str, Any] ,*_snake_case : int ,**_snake_case : List[Any] ) -> Any:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : int = ["sentencepiece"]
def __init__( self : Union[str, Any] ,*_snake_case : Any ,**_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Any = ["sentencepiece"]
def __init__( self : Dict ,*_snake_case : Dict ,**_snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : str = ["sentencepiece"]
def __init__( self : Any ,*_snake_case : str ,**_snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Dict = ["sentencepiece"]
def __init__( self : Optional[Any] ,*_snake_case : Optional[int] ,**_snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : str = ["sentencepiece"]
def __init__( self : Optional[Any] ,*_snake_case : Tuple ,**_snake_case : List[str] ) -> str:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Tuple = ["sentencepiece"]
def __init__( self : Any ,*_snake_case : Optional[Any] ,**_snake_case : Optional[Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = ["sentencepiece"]
def __init__( self : List[Any] ,*_snake_case : Optional[int] ,**_snake_case : str ) -> List[Any]:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Tuple = ["sentencepiece"]
def __init__( self : str ,*_snake_case : Union[str, Any] ,**_snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = ["sentencepiece"]
def __init__( self : List[Any] ,*_snake_case : Tuple ,**_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(self ,['''sentencepiece'''] )
| 16 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'torchsde']
def __init__( self: int , *_SCREAMING_SNAKE_CASE: Optional[Any] , **_SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch", "torchsde"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[Any] , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Dict , *_SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: Any) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"]) | 269 | 0 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
if n == 1 or not isinstance(_a , _a ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase_ : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : str = 2
while digits < n:
index += 1
UpperCAmelCase_ : Any = len(str(fibonacci(_a ) ) )
return index
def lowercase__ ( __snake_case : int = 1_000 ):
'''simple docstring'''
return fibonacci_digits_index(_a )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 364 |
def lowercase__ ( __snake_case : str , __snake_case : int , __snake_case : List[str] ):
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__snake_case , n - 1 , __snake_case ) * a) % mod
else:
UpperCAmelCase_ : Optional[int] = binary_exponentiation(__snake_case , n / 2 , __snake_case )
return (b * b) % mod
# a prime number
__UpperCAmelCase = 701
__UpperCAmelCase = 1000000000
__UpperCAmelCase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 145 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
'''simple docstring'''
def __init__( self , A , A=13 , A=32 , A=2 , A=3 , A=16 , A=[1, 2, 1] , A=[2, 2, 4] , A=2 , A=2.0 , A=True , A=0.0 , A=0.0 , A=0.1 , A="gelu" , A=False , A=True , A=0.02 , A=1e-5 , A=True , A=None , A=True , A=10 , A=8 , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = embed_dim
_SCREAMING_SNAKE_CASE = depths
_SCREAMING_SNAKE_CASE = num_heads
_SCREAMING_SNAKE_CASE = window_size
_SCREAMING_SNAKE_CASE = mlp_ratio
_SCREAMING_SNAKE_CASE = qkv_bias
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = drop_path_rate
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = use_absolute_embeddings
_SCREAMING_SNAKE_CASE = patch_norm
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = encoder_stride
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def snake_case_( self ) -> str:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def snake_case_( self , A , A , A ) -> Dict:
_SCREAMING_SNAKE_CASE = SwinvaModel(config=A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = model(A )
_SCREAMING_SNAKE_CASE = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_SCREAMING_SNAKE_CASE = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case_( self , A , A , A ) -> Tuple:
_SCREAMING_SNAKE_CASE = SwinvaForMaskedImageModeling(config=A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = model(A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = SwinvaForMaskedImageModeling(A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case_( self , A , A , A ) -> Dict:
_SCREAMING_SNAKE_CASE = self.type_sequence_label_size
_SCREAMING_SNAKE_CASE = SwinvaForImageClassification(A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = SwinvaModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A , embed_dim=37 )
def snake_case_( self ) -> Optional[int]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def snake_case_( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def snake_case_( self ) -> int:
pass
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A )
_SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(A , A ) )
_SCREAMING_SNAKE_CASE = outputs.attentions
_SCREAMING_SNAKE_CASE = len(self.model_tester.depths )
self.assertEqual(len(A ) , A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = config.window_size**2
_SCREAMING_SNAKE_CASE = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(A , A ) )
_SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(A ) , A )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
_SCREAMING_SNAKE_CASE = len(A )
# Check attention is always last and order is fine
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(A , A ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
_SCREAMING_SNAKE_CASE = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_SCREAMING_SNAKE_CASE = 2
self.assertEqual(out_len + added_hidden_states , len(A ) )
_SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(A ) , A )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def snake_case_( self , A , A , A , A ) -> Dict:
_SCREAMING_SNAKE_CASE = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(A , A ) )
_SCREAMING_SNAKE_CASE = outputs.hidden_states
_SCREAMING_SNAKE_CASE = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(A ) , A )
# Swinv2 has a different seq_length
_SCREAMING_SNAKE_CASE = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_SCREAMING_SNAKE_CASE = outputs.reshaped_hidden_states
self.assertEqual(len(A ) , A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = reshaped_hidden_states[0].shape
_SCREAMING_SNAKE_CASE = (
reshaped_hidden_states[0].view(A , A , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(A , A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(A , A , A , A )
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_SCREAMING_SNAKE_CASE = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_SCREAMING_SNAKE_CASE = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_SCREAMING_SNAKE_CASE = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(A , A , A , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(A , A , A , (padded_height, padded_width) )
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def snake_case_( self ) -> List[str]:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = SwinvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = _config_zero_init(A )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(config=A )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case_( self ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
A )
_SCREAMING_SNAKE_CASE = self.default_image_processor
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_SCREAMING_SNAKE_CASE = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**A )
# verify the logits
_SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A )
_SCREAMING_SNAKE_CASE = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
| 58 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
A__ : Union[str, Any] = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[str] = "canine"
def __init__( self : Optional[Any] , snake_case__ : Union[str, Any]=768 , snake_case__ : Tuple=12 , snake_case__ : Optional[Any]=12 , snake_case__ : Union[str, Any]=3072 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : int=1_6384 , snake_case__ : str=16 , snake_case__ : Tuple=0.02 , snake_case__ : Dict=1E-12 , snake_case__ : Any=0 , snake_case__ : Optional[int]=0xe_000 , snake_case__ : List[str]=0xe_001 , snake_case__ : List[str]=4 , snake_case__ : List[str]=4 , snake_case__ : List[Any]=8 , snake_case__ : List[str]=1_6384 , snake_case__ : Union[str, Any]=128 , **snake_case__ : Tuple , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCamelCase_ : Tuple =max_position_embeddings
lowerCamelCase_ : Optional[int] =hidden_size
lowerCamelCase_ : Tuple =num_hidden_layers
lowerCamelCase_ : Dict =num_attention_heads
lowerCamelCase_ : str =intermediate_size
lowerCamelCase_ : Dict =hidden_act
lowerCamelCase_ : List[Any] =hidden_dropout_prob
lowerCamelCase_ : Union[str, Any] =attention_probs_dropout_prob
lowerCamelCase_ : Dict =initializer_range
lowerCamelCase_ : Tuple =type_vocab_size
lowerCamelCase_ : Optional[Any] =layer_norm_eps
# Character config:
lowerCamelCase_ : List[str] =downsampling_rate
lowerCamelCase_ : List[Any] =upsampling_kernel_size
lowerCamelCase_ : Any =num_hash_functions
lowerCamelCase_ : Optional[int] =num_hash_buckets
lowerCamelCase_ : Union[str, Any] =local_transformer_stride
| 144 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ ):
snake_case__ : Optional[int] = '''resnet'''
snake_case__ : Dict = ['''basic''', '''bottleneck''']
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=6_4 , SCREAMING_SNAKE_CASE__ : int=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[3, 4, 6, 3] , SCREAMING_SNAKE_CASE__ : Union[str, Any]="bottleneck" , SCREAMING_SNAKE_CASE__ : Tuple="relu" , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE__ )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
a_ : int = num_channels
a_ : Optional[int] = embedding_size
a_ : List[str] = hidden_sizes
a_ : Tuple = depths
a_ : int = layer_type
a_ : Dict = hidden_act
a_ : Any = downsample_in_first_stage
a_ : int = ['stem'] + [F"""stage{idx}""" for idx in range(1 , len(SCREAMING_SNAKE_CASE__ ) + 1 )]
a_ , a_ : str = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE__ , out_indices=SCREAMING_SNAKE_CASE__ , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Dict = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> float:
return 1E-3
| 120 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ ):
snake_case__ : Dict = 1
@register_to_config
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=2_0_0_0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=2_0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1E-3 ) -> Optional[int]:
a_ : Tuple = None
a_ : int = None
a_ : Tuple = None
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, torch.device] = None ) -> List[str]:
a_ : Tuple = torch.linspace(1 , self.config.sampling_eps , SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple=None ) -> Tuple:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
a_ : Tuple = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
a_ : int = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
a_ : Dict = std.flatten()
while len(std.shape ) < len(score.shape ):
a_ : str = std.unsqueeze(-1 )
a_ : List[str] = -score / std
# compute
a_ : List[str] = -1.0 / len(self.timesteps )
a_ : Optional[int] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
a_ : Optional[Any] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
a_ : List[str] = beta_t.unsqueeze(-1 )
a_ : Optional[Any] = -0.5 * beta_t * x
a_ : Tuple = torch.sqrt(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = drift - diffusion**2 * score
a_ : List[str] = x + drift * dt
# add noise
a_ : Optional[Any] = randn_tensor(x.shape , layout=x.layout , generator=SCREAMING_SNAKE_CASE__ , device=x.device , dtype=x.dtype )
a_ : Optional[Any] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : int ) -> Tuple:
return self.config.num_train_timesteps
| 120 | 1 |
import copy
import re
class _SCREAMING_SNAKE_CASE :
snake_case__ : str = """hp"""
snake_case__ : Dict = {}
snake_case__ : Tuple = None
@classmethod
def _A ( cls : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ):
UpperCamelCase :List[str] = prefix
UpperCamelCase :List[str] = defaults
cls.build_naming_info()
@staticmethod
def _A ( __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ):
if len(__lowerCamelCase ) == 0:
return ""
UpperCamelCase :Union[str, Any] = None
if any(char.isdigit() for char in word ):
raise Exception(F"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__lowerCamelCase ) + 1 ):
UpperCamelCase :Any = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
UpperCamelCase :Optional[Any] = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__lowerCamelCase : Optional[Any] ):
UpperCamelCase :Optional[int] = """"""
while integer != 0:
UpperCamelCase :List[str] = chr(ord("""A""" ) + integer % 10 ) + s
integer //= 10
return s
UpperCamelCase :Tuple = 0
while True:
UpperCamelCase :List[Any] = word + """#""" + int_to_alphabetic(__lowerCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
UpperCamelCase :List[str] = sword
break
UpperCamelCase :Dict = short_word
UpperCamelCase :Optional[Any] = word
return short_word
@staticmethod
def _A ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ):
UpperCamelCase :Any = param_name.split("""_""" )
UpperCamelCase :int = [TrialShortNamer.shortname_for_word(__lowerCamelCase , __lowerCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
UpperCamelCase :Tuple = ["""""", """_"""]
for separator in separators:
UpperCamelCase :List[str] = separator.join(__lowerCamelCase )
if shortname not in info["reverse_short_param"]:
UpperCamelCase :List[Any] = shortname
UpperCamelCase :List[Any] = param_name
return shortname
return param_name
@staticmethod
def _A ( __lowerCamelCase : str , __lowerCamelCase : Dict ):
UpperCamelCase :Dict = TrialShortNamer.shortname_for_key(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Dict = short_name
UpperCamelCase :int = param_name
@classmethod
def _A ( cls : Tuple ):
if cls.NAMING_INFO is not None:
return
UpperCamelCase :Optional[Any] = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
UpperCamelCase :Any = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :int = info
@classmethod
def _A ( cls : Dict , __lowerCamelCase : Optional[Any] ):
cls.build_naming_info()
assert cls.PREFIX is not None
UpperCamelCase :Any = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
UpperCamelCase :int = cls.NAMING_INFO["""short_param"""][k]
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :List[Any] = 1 if v else 0
UpperCamelCase :int = """""" if isinstance(__lowerCamelCase , (int, float) ) else """-"""
UpperCamelCase :Dict = F"""{key}{sep}{v}"""
name.append(__lowerCamelCase )
return "_".join(__lowerCamelCase )
@classmethod
def _A ( cls : Tuple , __lowerCamelCase : Any ):
UpperCamelCase :Optional[int] = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
UpperCamelCase :Union[str, Any] = []
else:
UpperCamelCase :Any = repr.split("""_""" )
UpperCamelCase :List[str] = {}
for value in values:
if "-" in value:
UpperCamelCase , UpperCamelCase :Dict = value.split("""-""" )
else:
UpperCamelCase :Union[str, Any] = re.sub("""[0-9.]""" , """""" , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = float(re.sub("""[^0-9.]""" , """""" , __lowerCamelCase ) )
UpperCamelCase :Any = cls.NAMING_INFO["""reverse_short_param"""][p_k]
UpperCamelCase :str = p_v
for k in cls.DEFAULTS:
if k not in parameters:
UpperCamelCase :Tuple = cls.DEFAULTS[k]
return parameters
| 38 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_logger()
# the current default level is logging.WARNING
__snake_case = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_verbosity()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(a__ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def a (self : Dict ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = os.getenv('''TRANSFORMERS_VERBOSITY''' , a__ )
__snake_case = logging.log_levels[env_level_str]
__snake_case = logging.get_verbosity()
self.assertEqual(
a__ , a__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
__snake_case = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def a (self : List[Any] ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.logging.getLogger()
with CaptureLogger(a__ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def a (self : Any ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
def lowerCamelCase__ ( ) -> str:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 24 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
_UpperCamelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCamelCase : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class _UpperCamelCase :
_UpperCamelCase : str = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
_UpperCamelCase : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : bool = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCAmelCase_ ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
UpperCamelCase_ = import_module("tasks" )
try:
UpperCamelCase_ = getattr(UpperCamelCase_ , model_args.task_type )
UpperCamelCase_ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , UpperCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
UpperCamelCase_ = token_classification_task.get_labels(data_args.labels )
UpperCamelCase_ = dict(enumerate(UpperCamelCase_ ) )
UpperCamelCase_ = len(UpperCamelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase_ , idalabel=UpperCamelCase_ , labelaid={label: i for i, label in enumerate(UpperCamelCase_ )} , cache_dir=model_args.cache_dir , )
UpperCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
UpperCamelCase_ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCamelCase_ = (
TokenClassificationDataset(
token_classification_task=UpperCamelCase_ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase_ , labels=UpperCamelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCamelCase_ = (
TokenClassificationDataset(
token_classification_task=UpperCamelCase_ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase_ , labels=UpperCamelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCamelCase_ , UpperCamelCase_ ) -> Tuple[List[int], List[int]]:
UpperCamelCase_ = np.argmax(UpperCamelCase_ , axis=2 )
UpperCamelCase_ , UpperCamelCase_ = preds.shape
UpperCamelCase_ = [[] for _ in range(UpperCamelCase_ )]
UpperCamelCase_ = [[] for _ in range(UpperCamelCase_ )]
for i in range(UpperCamelCase_ ):
for j in range(UpperCamelCase_ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCamelCase_ ) -> Dict:
UpperCamelCase_ , UpperCamelCase_ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCamelCase_ , UpperCamelCase_ ),
"precision": precision_score(UpperCamelCase_ , UpperCamelCase_ ),
"recall": recall_score(UpperCamelCase_ , UpperCamelCase_ ),
"f1": fa_score(UpperCamelCase_ , UpperCamelCase_ ),
}
# Data collator
UpperCamelCase_ = DataCollatorWithPadding(UpperCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCamelCase_ = Trainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , compute_metrics=UpperCamelCase_ , data_collator=UpperCamelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase_ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCamelCase_ = trainer.evaluate()
UpperCamelCase_ = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCamelCase_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , UpperCamelCase_ , UpperCamelCase_ )
writer.write("%s = %s\n" % (key, value) )
results.update(UpperCamelCase_ )
# Predict
if training_args.do_predict:
UpperCamelCase_ = TokenClassificationDataset(
token_classification_task=UpperCamelCase_ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase_ , labels=UpperCamelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = trainer.predict(UpperCamelCase_ )
UpperCamelCase_ , UpperCamelCase_ = align_predictions(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase_ = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCamelCase_ , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , UpperCamelCase_ , UpperCamelCase_ )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
UpperCamelCase_ = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(UpperCamelCase_ , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return results
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 366 |
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
UpperCamelCase_ = len(UpperCamelCase_ )
UpperCamelCase_ = len(matrix[0] )
UpperCamelCase_ = min(UpperCamelCase_ , UpperCamelCase_ )
for row in range(UpperCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase_ ):
UpperCamelCase_ = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase_ = True
for i in range(row + 1 , UpperCamelCase_ ):
if matrix[i][row] != 0:
UpperCamelCase_ , UpperCamelCase_ = matrix[i], matrix[row]
UpperCamelCase_ = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase_ ):
UpperCamelCase_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 0 |
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
_a = {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=True )-> Optional[Any]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_UpperCAmelCase = cached_file(__lowerCAmelCase , __lowerCAmelCase , force_download=not use_cached_models )
_UpperCAmelCase = config_class.from_json_file(__lowerCAmelCase )
_UpperCAmelCase = True
_UpperCAmelCase = True
print(F"""Building TensorFlow model from configuration: {config}""" )
_UpperCAmelCase = model_class(__lowerCAmelCase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_UpperCAmelCase = cached_file(
__lowerCAmelCase , __lowerCAmelCase , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_UpperCAmelCase = load_pytorch_checkpoint_in_tfa_model(__lowerCAmelCase , __lowerCAmelCase )
if compare_with_pt_model:
_UpperCAmelCase = tf_model(tf_model.dummy_inputs , training=__lowerCAmelCase ) # build the network
_UpperCAmelCase = torch.load(__lowerCAmelCase , map_location='cpu' )
_UpperCAmelCase = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__lowerCAmelCase , config=__lowerCAmelCase , state_dict=__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = pt_model(**pt_model.dummy_inputs )
_UpperCAmelCase = pto[0].numpy()
_UpperCAmelCase = tfo[0].numpy()
_UpperCAmelCase = np.amax(np.abs(np_pt - np_tf ) )
print(F"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2E-2, F"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(F"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(__lowerCAmelCase , save_format='h5' )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , )-> Tuple:
"""simple docstring"""
if args_model_type is None:
_UpperCAmelCase = list(MODEL_CLASSES.keys() )
else:
_UpperCAmelCase = [args_model_type]
for j, model_type in enumerate(__lowerCAmelCase , start=1 ):
print('=' * 100 )
print(F""" Converting model type {j}/{len(__lowerCAmelCase )}: {model_type}""" )
print('=' * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_UpperCAmelCase = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_UpperCAmelCase = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__lowerCAmelCase , __lowerCAmelCase ) , start=1 ):
print('-' * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
_UpperCAmelCase = model_shortcut_name
elif only_convert_finetuned_models:
print(F""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
F""" Converting checkpoint {i}/{len(__lowerCAmelCase )}: {model_shortcut_name} - model_type {model_type}""" )
print('-' * 100 )
if config_shortcut_name in aws_config_map:
_UpperCAmelCase = cached_file(__lowerCAmelCase , __lowerCAmelCase , force_download=not use_cached_models )
else:
_UpperCAmelCase = config_shortcut_name
if model_shortcut_name in aws_model_maps:
_UpperCAmelCase = cached_file(__lowerCAmelCase , __lowerCAmelCase , force_download=not use_cached_models )
else:
_UpperCAmelCase = model_shortcut_name
if os.path.isfile(__lowerCAmelCase ):
_UpperCAmelCase = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=__lowerCAmelCase , pytorch_checkpoint_path=__lowerCAmelCase , config_file=__lowerCAmelCase , tf_dump_path=os.path.join(__lowerCAmelCase , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=__lowerCAmelCase , )
if remove_cached_files:
os.remove(__lowerCAmelCase )
os.remove(__lowerCAmelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
F'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
_a = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 39 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = TransfoXLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
_UpperCAmelCase = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase ( self , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = '<unk> UNwanted , running'
_UpperCAmelCase = '<unk> unwanted, running'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=UpperCAmelCase )
_UpperCAmelCase = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(UpperCAmelCase , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [0, 4, 8, 7] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
_UpperCAmelCase = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
_UpperCAmelCase = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(UpperCAmelCase ) , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = len(UpperCAmelCase )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(UpperCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 39 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _snake_case ( _lowercase ):
lowerCamelCase__: UNetaDModel
lowerCamelCase__: ScoreSdeVeScheduler
def __init__( self: List[Any] , __lowerCamelCase: UNetaDModel , __lowerCamelCase: ScoreSdeVeScheduler ) -> Optional[int]:
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self: Union[str, Any] , __lowerCamelCase: int = 1 , __lowerCamelCase: int = 20_00 , __lowerCamelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase: Optional[str] = "pil" , __lowerCamelCase: bool = True , **__lowerCamelCase: Union[str, Any] , ) -> Union[ImagePipelineOutput, Tuple]:
__UpperCAmelCase : List[Any] = self.unet.config.sample_size
__UpperCAmelCase : Dict = (batch_size, 3, img_size, img_size)
__UpperCAmelCase : Optional[int] = self.unet
__UpperCAmelCase : Optional[Any] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase ) * self.scheduler.init_noise_sigma
__UpperCAmelCase : List[Any] = sample.to(self.device )
self.scheduler.set_timesteps(__lowerCamelCase )
self.scheduler.set_sigmas(__lowerCamelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCAmelCase : Dict = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__UpperCAmelCase : Dict = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
__UpperCAmelCase : Optional[int] = self.scheduler.step_correct(__lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
# prediction step
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase , __lowerCamelCase ).sample
__UpperCAmelCase : Tuple = self.scheduler.step_pred(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = output.prev_sample, output.prev_sample_mean
__UpperCAmelCase : Optional[int] = sample_mean.clamp(0 , 1 )
__UpperCAmelCase : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase : Dict = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 342 | import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_snake_case = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_snake_case = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Dict:
__UpperCAmelCase : Tuple = (
list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : str = bs[:]
__UpperCAmelCase : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Optional[Any] = [chr(snake_case__ ) for n in cs]
return dict(zip(snake_case__, snake_case__ ) )
def _UpperCamelCase ( snake_case__ ) -> Any:
__UpperCAmelCase : List[Any] = set()
__UpperCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Union[str, Any] = char
return pairs
class _snake_case ( _lowercase ):
lowerCamelCase__: str = VOCAB_FILES_NAMES
lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: Dict = ["input_ids", "attention_mask"]
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]="replace" , __lowerCamelCase: List[str]="<s>" , __lowerCamelCase: List[str]="</s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<s>" , __lowerCamelCase: Optional[int]="<unk>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: List[str]="<mask>" , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ) -> List[str]:
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
__UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
__UpperCAmelCase : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
__UpperCAmelCase : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : List[Any] = json.load(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Dict = errors # how to handle errors in decoding
__UpperCAmelCase : Optional[int] = bytes_to_unicode()
__UpperCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : List[Any] = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : int = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCamelCase ( self: Dict ) -> Any:
return len(self.encoder )
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self: int , __lowerCamelCase: List[Any] ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : Dict = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
__UpperCAmelCase : Optional[int] = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : str = 0
while i < len(__lowerCamelCase ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Union[str, Any] = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : str = new_word
if len(__lowerCamelCase ) == 1:
break
else:
__UpperCAmelCase : Optional[Any] = get_pairs(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = word
return word
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> Dict:
__UpperCAmelCase : Any = []
for token in re.findall(self.pat , __lowerCamelCase ):
__UpperCAmelCase : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> Dict:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] ) -> List[str]:
return self.decoder.get(__lowerCamelCase )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : Dict = "".join(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
__UpperCAmelCase : Optional[Any] = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : Optional[Any] = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Optional[Any] = " " + text
return (text, kwargs)
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[str]:
return token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: "Conversation" ) -> List[int]:
__UpperCAmelCase : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
__UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 342 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowerCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] = {'''vocab_file''': '''vocab.txt'''}
lowerCAmelCase__ : Optional[int] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
lowerCAmelCase__ : Optional[Any] = {
'''YituTech/conv-bert-base''': 5_12,
'''YituTech/conv-bert-medium-small''': 5_12,
'''YituTech/conv-bert-small''': 5_12,
}
lowerCAmelCase__ : int = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ConvBertTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase="[UNK]" , __UpperCamelCase="[SEP]" , __UpperCamelCase="[PAD]" , __UpperCamelCase="[CLS]" , __UpperCamelCase="[MASK]" , __UpperCamelCase=True , __UpperCamelCase=None , **__UpperCamelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , __UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __UpperCamelCase ) != tokenize_chinese_chars
):
snake_case__ : List[str] = getattr(__UpperCamelCase , normalizer_state.pop('type' ) )
snake_case__ : List[Any] = do_lower_case
snake_case__ : List[Any] = strip_accents
snake_case__ : Any = tokenize_chinese_chars
snake_case__ : Dict = normalizer_class(**__UpperCamelCase )
snake_case__ : Dict = do_lower_case
def __a ( self , __UpperCamelCase , __UpperCamelCase=None ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = [self.sep_token_id]
snake_case__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case__ : Optional[int] = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 143 | import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( _lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = KandinskyVaaControlnetPipeline
__lowerCamelCase = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowerCamelCase = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowerCamelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowerCamelCase = False
@property
def __a ( self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def __a ( self ) -> int:
'''simple docstring'''
return 32
@property
def __a ( self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def __a ( self ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __a ( self ) -> List[Any]:
'''simple docstring'''
return 100
@property
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Tuple = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
snake_case__ : Tuple = UNetaDConditionModel(**__UpperCamelCase )
return model
@property
def __a ( self ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __a ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : int = self.dummy_unet
snake_case__ : Tuple = self.dummy_movq
snake_case__ : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=__UpperCamelCase , )
snake_case__ : str = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> int:
'''simple docstring'''
snake_case__ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__UpperCamelCase )
# create hint
snake_case__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith('mps' ):
snake_case__ : Any = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ : str = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case__ : int = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = 'cpu'
snake_case__ : Any = self.get_dummy_components()
snake_case__ : Optional[Any] = self.pipeline_class(**__UpperCamelCase )
snake_case__ : Dict = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
snake_case__ : Dict = output.images
snake_case__ : Any = pipe(
**self.get_dummy_inputs(__UpperCamelCase ) , return_dict=__UpperCamelCase , )[0]
snake_case__ : Optional[int] = image[0, -3:, -3:, -1]
snake_case__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ : str = np.array(
[0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
snake_case__ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
snake_case__ : List[str] = torch.from_numpy(np.array(__UpperCamelCase ) ).float() / 2_5_5.0
snake_case__ : Dict = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case__ : int = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCamelCase )
snake_case__ : int = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
snake_case__ : List[Any] = pipeline.to(__UpperCamelCase )
pipeline.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Optional[int] = 'A robot, 4k photo'
snake_case__ : List[Any] = torch.Generator(device='cuda' ).manual_seed(0 )
snake_case__ , snake_case__ : Tuple = pipe_prior(
__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
snake_case__ : List[Any] = torch.Generator(device='cuda' ).manual_seed(0 )
snake_case__ : Dict = pipeline(
image_embeds=__UpperCamelCase , negative_image_embeds=__UpperCamelCase , hint=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=100 , output_type='np' , )
snake_case__ : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
| 143 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class A_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Any = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Union[str, Any] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def UpperCAmelCase_ ( self : Dict , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = AudioClassificationPipeline(model=lowercase_ , feature_extractor=lowercase_ )
# test with a raw waveform
UpperCAmelCase : str = np.zeros((34_000,) )
UpperCAmelCase : List[Any] = np.zeros((14_000,) )
return audio_classifier, [audioa, audio]
def UpperCAmelCase_ ( self : Any , lowercase_ : List[Any] , lowercase_ : List[str] ) -> Tuple:
UpperCAmelCase : Union[str, Any] = examples
UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase_ )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowercase_ , [
{'score': ANY(lowercase_ ), 'label': ANY(lowercase_ )},
{'score': ANY(lowercase_ ), 'label': ANY(lowercase_ )},
] , )
UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase_ , top_k=1 )
self.assertEqual(
lowercase_ , [
{'score': ANY(lowercase_ ), 'label': ANY(lowercase_ )},
] , )
self.run_torchaudio(lowercase_ )
@require_torchaudio
def UpperCAmelCase_ ( self : int , lowercase_ : Any ) -> Dict:
import datasets
# test with a local file
UpperCAmelCase : str = datasets.load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
UpperCAmelCase : Union[str, Any] = dataset[0]['audio']['array']
UpperCAmelCase : List[str] = audio_classifier(lowercase_ )
self.assertEqual(
lowercase_ , [
{'score': ANY(lowercase_ ), 'label': ANY(lowercase_ )},
{'score': ANY(lowercase_ ), 'label': ANY(lowercase_ )},
] , )
@require_torch
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
UpperCAmelCase : Any = 'anton-l/wav2vec2-random-tiny-classifier'
UpperCAmelCase : Any = pipeline('audio-classification' , model=lowercase_ )
UpperCAmelCase : Dict = np.ones((8_000,) )
UpperCAmelCase : List[Any] = audio_classifier(lowercase_ , top_k=4 )
UpperCAmelCase : Union[str, Any] = [
{'score': 0.0842, 'label': 'no'},
{'score': 0.0838, 'label': 'up'},
{'score': 0.0837, 'label': 'go'},
{'score': 0.0834, 'label': 'right'},
]
UpperCAmelCase : Any = [
{'score': 0.0845, 'label': 'stop'},
{'score': 0.0844, 'label': 'on'},
{'score': 0.0841, 'label': 'right'},
{'score': 0.0834, 'label': 'left'},
]
self.assertIn(nested_simplify(lowercase_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
UpperCAmelCase : int = {'array': np.ones((8_000,) ), 'sampling_rate': audio_classifier.feature_extractor.sampling_rate}
UpperCAmelCase : List[str] = audio_classifier(lowercase_ , top_k=4 )
self.assertIn(nested_simplify(lowercase_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def UpperCAmelCase_ ( self : str ) -> List[str]:
import datasets
UpperCAmelCase : Union[str, Any] = 'superb/wav2vec2-base-superb-ks'
UpperCAmelCase : Optional[int] = pipeline('audio-classification' , model=lowercase_ )
UpperCAmelCase : Optional[int] = datasets.load_dataset('anton-l/superb_dummy' , 'ks' , split='test' )
UpperCAmelCase : Union[str, Any] = np.array(dataset[3]['speech'] , dtype=np.floataa )
UpperCAmelCase : Optional[int] = audio_classifier(lowercase_ , top_k=4 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=3 ) , [
{'score': 0.981, 'label': 'go'},
{'score': 0.007, 'label': 'up'},
{'score': 0.006, 'label': '_unknown_'},
{'score': 0.001, 'label': 'down'},
] , )
@require_tf
@unittest.skip('Audio classification is not implemented for TF' )
def UpperCAmelCase_ ( self : List[str] ) -> Any:
pass
| 369 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
set_seed(770)
lowercase__ = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
lowercase__ = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
lowercase__ = os.path.dirname(os.path.abspath(__file__))
lowercase__ = os.path.join(os.path.expanduser("~"), ".cache")
lowercase__ = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=False ):
UpperCAmelCase : List[str] = model_type
if use_small:
key += "_small"
return os.path.join(UpperCAmelCase_ , REMOTE_MODEL_PATHS[key]['file_name'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
hf_hub_download(repo_id=UpperCAmelCase_ , filename=UpperCAmelCase_ , local_dir=UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_="text" ):
if model_type == "text":
UpperCAmelCase : Dict = BarkSemanticModel
UpperCAmelCase : List[Any] = BarkSemanticConfig
UpperCAmelCase : Optional[int] = BarkSemanticGenerationConfig
elif model_type == "coarse":
UpperCAmelCase : List[str] = BarkCoarseModel
UpperCAmelCase : Dict = BarkCoarseConfig
UpperCAmelCase : int = BarkCoarseGenerationConfig
elif model_type == "fine":
UpperCAmelCase : List[Any] = BarkFineModel
UpperCAmelCase : Optional[Any] = BarkFineConfig
UpperCAmelCase : Dict = BarkFineGenerationConfig
else:
raise NotImplementedError()
UpperCAmelCase : Optional[int] = F"""{model_type}_small""" if use_small else model_type
UpperCAmelCase : Tuple = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(UpperCAmelCase_ ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info['repo_id'] , model_info['file_name'] )
UpperCAmelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location=UpperCAmelCase_ )
# this is a hack
UpperCAmelCase : str = checkpoint['model_args']
if "input_vocab_size" not in model_args:
UpperCAmelCase : Union[str, Any] = model_args['vocab_size']
UpperCAmelCase : Union[str, Any] = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
UpperCAmelCase : Any = model_args.pop('n_head' )
UpperCAmelCase : Optional[Any] = model_args.pop('n_embd' )
UpperCAmelCase : Union[str, Any] = model_args.pop('n_layer' )
UpperCAmelCase : List[Any] = ConfigClass(**checkpoint['model_args'] )
UpperCAmelCase : List[str] = ModelClass(config=UpperCAmelCase_ )
UpperCAmelCase : List[str] = GenerationConfigClass()
UpperCAmelCase : Dict = model_generation_config
UpperCAmelCase : int = checkpoint['model']
# fixup checkpoint
UpperCAmelCase : Tuple = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(UpperCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
UpperCAmelCase : str = k[len(UpperCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
UpperCAmelCase : List[Any] = new_k.replace(UpperCAmelCase_ , new_layer_name_dict[old_layer_name] )
UpperCAmelCase : List[Any] = state_dict.pop(UpperCAmelCase_ )
UpperCAmelCase : Tuple = set(state_dict.keys() ) - set(model.state_dict().keys() )
UpperCAmelCase : Optional[int] = {k for k in extra_keys if not k.endswith('.attn.bias' )}
UpperCAmelCase : str = set(model.state_dict().keys() ) - set(state_dict.keys() )
UpperCAmelCase : str = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(UpperCAmelCase_ ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(UpperCAmelCase_ ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
UpperCAmelCase : List[str] = model.num_parameters(exclude_embeddings=UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = checkpoint['best_val_loss'].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(UpperCAmelCase_ , 3 )} loss""" )
model.eval()
model.to(UpperCAmelCase_ )
del checkpoint, state_dict
return model
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
UpperCAmelCase : List[str] = 'cpu' # do conversion on cpu
UpperCAmelCase : List[str] = _get_ckpt_path(UpperCAmelCase_ , use_small=UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = _load_model(UpperCAmelCase_ , UpperCAmelCase_ , model_type=UpperCAmelCase_ , use_small=UpperCAmelCase_ )
# load bark initial model
UpperCAmelCase : List[str] = _bark_load_model(UpperCAmelCase_ , 'cpu' , model_type=UpperCAmelCase_ , use_small=UpperCAmelCase_ )
if model_type == "text":
UpperCAmelCase : Tuple = bark_model['model']
if model.num_parameters(exclude_embeddings=UpperCAmelCase_ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
UpperCAmelCase : Optional[int] = 5
UpperCAmelCase : Optional[int] = 10
if model_type in ["text", "coarse"]:
UpperCAmelCase : List[Any] = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int )
UpperCAmelCase : Optional[Any] = bark_model(UpperCAmelCase_ )[0]
UpperCAmelCase : List[str] = model(UpperCAmelCase_ )
# take last logits
UpperCAmelCase : str = output_new_model_total.logits[:, [-1], :]
else:
UpperCAmelCase : Optional[int] = 3
UpperCAmelCase : List[Any] = 8
UpperCAmelCase : Dict = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
UpperCAmelCase : str = model(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Dict = bark_model(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Any = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('initial and new outputs are not equal' )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : int = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Dict = BarkSemanticConfig.from_pretrained(os.path.join(UpperCAmelCase_ , 'config.json' ) )
UpperCAmelCase : Any = BarkCoarseConfig.from_pretrained(os.path.join(UpperCAmelCase_ , 'config.json' ) )
UpperCAmelCase : Union[str, Any] = BarkFineConfig.from_pretrained(os.path.join(UpperCAmelCase_ , 'config.json' ) )
UpperCAmelCase : Any = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
UpperCAmelCase : Dict = BarkSemanticModel.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : Tuple = BarkCoarseModel.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = BarkFineModel.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : str = EncodecModel.from_pretrained('facebook/encodec_24khz' )
UpperCAmelCase : Optional[Any] = BarkConfig.from_sub_model_configs(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
UpperCAmelCase : str = BarkModel(UpperCAmelCase_ )
UpperCAmelCase : int = semantic
UpperCAmelCase : Tuple = coarseAcoustic
UpperCAmelCase : Union[str, Any] = fineAcoustic
UpperCAmelCase : Union[str, Any] = codec
UpperCAmelCase : Optional[int] = bark_generation_config
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
bark.save_pretrained(UpperCAmelCase_ , repo_id=UpperCAmelCase_ , push_to_hub=UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
lowercase__ = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 280 | 0 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_SCREAMING_SNAKE_CASE = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
_SCREAMING_SNAKE_CASE = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
_SCREAMING_SNAKE_CASE = r"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/hendrycks/math""" , codebase_urls=["""https://github.com/hendrycks/math"""] , )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
UpperCamelCase = 0.0
for i, j in zip(__snake_case , __snake_case ):
n_correct += 1.0 if math_equivalence.is_equiv(__snake_case , __snake_case ) else 0.0
UpperCamelCase = n_correct / len(__snake_case )
return {
"accuracy": accuracy,
}
| 343 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCamelCase__: Optional[int] = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 23 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __A ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = "ssube/stable-diffusion-x4-upscaler-onnx"
def SCREAMING_SNAKE_CASE ( self , __A=0 ) -> Dict:
a =floats_tensor((1, 3, 128, 128) , rng=random.Random(__A ) )
a =torch.manual_seed(__A )
a ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__A )
a =self.get_dummy_inputs()
a =pipe(**__A ).images
a =image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
a =np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
a =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A )
pipe.set_progress_bar_config(disable=__A )
a =self.get_dummy_inputs()
a =pipe(**__A ).images
a =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a =np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
a =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
a =self.get_dummy_inputs()
a =pipe(**__A ).images
a =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a =np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
a =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
a =self.get_dummy_inputs()
a =pipe(**__A ).images
a =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a =np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
a =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
a =self.get_dummy_inputs()
a =pipe(**__A ).images
a =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a =np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __A ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =ort.SessionOptions()
a =False
return options
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
a =init_image.resize((128, 128) )
# using the PNDM scheduler by default
a =OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__A )
a ='''A fantasy landscape, trending on artstation'''
a =torch.manual_seed(0 )
a =pipe(
prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=10 , generator=__A , output_type='''np''' , )
a =output.images
a =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
a =np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
a =init_image.resize((128, 128) )
a =LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
a =OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__A )
a ='''A fantasy landscape, trending on artstation'''
a =torch.manual_seed(0 )
a =pipe(
prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=20 , generator=__A , output_type='''np''' , )
a =output.images
a =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
a =np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 | 215 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowerCamelCase_ : int = logging.get_logger(__name__)
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "upernet"
def __init__( self , __A=None , __A=512 , __A=0.02 , __A=[1, 2, 3, 6] , __A=True , __A=0.4 , __A=384 , __A=256 , __A=1 , __A=False , __A=255 , **__A , ) -> Tuple:
super().__init__(**__A )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
a =CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(__A , __A ):
a =backbone_config.get('''model_type''' )
a =CONFIG_MAPPING[backbone_model_type]
a =config_class.from_dict(__A )
a =backbone_config
a =hidden_size
a =initializer_range
a =pool_scales
a =use_auxiliary_head
a =auxiliary_loss_weight
a =auxiliary_in_channels
a =auxiliary_channels
a =auxiliary_num_convs
a =auxiliary_concat_input
a =loss_ignore_index
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =copy.deepcopy(self.__dict__ )
a =self.backbone_config.to_dict()
a =self.__class__.model_type
return output | 215 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*A_ : List[str] ,**A_ : int ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' ,A_ ,)
super().__init__(*A_ ,**A_ ) | 74 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _snake_case ( snake_case__ : Dict ):
A = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def _snake_case ( snake_case__ : int ):
A , A = emb.weight.shape
A = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
A = emb.weight.data
return lin_layer
def _snake_case ( snake_case__ : List[str] , snake_case__ : Any="facebook/mbart-large-en-ro" , snake_case__ : Optional[int]=False , snake_case__ : List[str]=False ):
A = torch.load(snake_case__ , map_location='cpu' )['model']
remove_ignore_keys_(snake_case__ )
A = state_dict['encoder.embed_tokens.weight'].shape[0]
A = MBartConfig.from_pretrained(snake_case__ , vocab_size=snake_case__ )
if mbart_aa and finetuned:
A = 'relu'
A = state_dict['decoder.embed_tokens.weight']
A = MBartForConditionalGeneration(snake_case__ )
model.model.load_state_dict(snake_case__ )
if finetuned:
A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
_lowercase = parser.parse_args()
_lowercase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 74 | 1 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
lowerCAmelCase_ : str = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
lowerCAmelCase_ : Union[str, Any] = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def _lowerCamelCase ( lowercase : List[str] ) -> List[Any]:
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = numpy_to_pil(lowercase )
return images
def _lowerCamelCase ( lowercase : int ) -> List[Any]:
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
_a = [Image.fromarray(lowercase ) for image in images]
return pil_images
| 346 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , *__a : Optional[int] , **__a : List[str] ):
super().__init__(*__a , **__a )
self.check_model_type(__a )
def UpperCamelCase__ ( self : Optional[Any] , __a : Dict=None , __a : int=None , __a : Optional[Any]=None , **__a : List[Any] ):
_a , _a = {}, {}
if padding is not None:
_a = padding
if truncation is not None:
_a = truncation
if top_k is not None:
_a = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any] , __a : Union["Image.Image", str] , __a : str = None , **__a : Any ):
if isinstance(__a , (Image.Image, str) ) and isinstance(__a , __a ):
_a = {"image": image, "question": question}
else:
_a = image
_a = super().__call__(__a , **__a )
return results
def UpperCamelCase__ ( self : Tuple , __a : Tuple , __a : Optional[Any]=False , __a : List[Any]=False ):
_a = load_image(inputs["image"] )
_a = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=__a , truncation=__a )
_a = self.image_processor(images=__a , return_tensors=self.framework )
model_inputs.update(__a )
return model_inputs
def UpperCamelCase__ ( self : List[Any] , __a : List[str] ):
_a = self.model(**__a )
return model_outputs
def UpperCamelCase__ ( self : int , __a : Optional[int] , __a : Dict=5 ):
if top_k > self.model.config.num_labels:
_a = self.model.config.num_labels
if self.framework == "pt":
_a = model_outputs.logits.sigmoid()[0]
_a , _a = probs.topk(__a )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
_a = scores.tolist()
_a = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
| 346 | 1 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : str ) -> None:
lowerCamelCase_ : Optional[int] =generate_pascal_triangle(__a )
for row_idx in range(__a ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def _snake_case ( lowerCamelCase__ : int ) -> list[list[int]]:
if not isinstance(__a , __a ):
raise TypeError("The input value of \'num_rows\' should be \'int\'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of \'num_rows\' should be greater than or equal to 0" )
lowerCamelCase_ : list[list[int]] =[]
for current_row_idx in range(__a ):
lowerCamelCase_ : List[Any] =populate_current_row(__a , __a )
triangle.append(__a )
return triangle
def _snake_case ( lowerCamelCase__ : int , lowerCamelCase__ : Dict ) -> list[int]:
lowerCamelCase_ : int =[-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase_ : Dict =1, 1
for current_col_idx in range(1 , __a ):
calculate_current_element(
__a , __a , __a , __a )
return current_row
def _snake_case ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , ) -> None:
lowerCamelCase_ : Any =triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase_ : List[Any] =triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase_ : List[Any] =above_to_left_elt + above_to_right_elt
def _snake_case ( lowerCamelCase__ : List[str] ) -> list[list[int]]:
if not isinstance(__a , __a ):
raise TypeError("The input value of \'num_rows\' should be \'int\'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of \'num_rows\' should be greater than or equal to 0" )
lowerCamelCase_ : list[list[int]] =[[1]]
for row_index in range(1 , __a ):
lowerCamelCase_ : Any =[0] + result[-1] + [0]
lowerCamelCase_ : Optional[int] =row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase_ : Any =sum(divmod(__a , 2 ) )
lowerCamelCase_ : Optional[int] =[
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
lowerCamelCase_ : Optional[int] =row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase_ : Dict =row_first_half + row_second_half
result.append(__a )
return result
def _snake_case ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCamelCase__ : List[Any] , lowerCamelCase__ : str ) -> None:
lowerCamelCase_ : List[str] =F"""{func.__name__}({value})"""
lowerCamelCase_ : List[str] =timeit(F"""__main__.{call}""" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__a , __a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 144 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__snake_case = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCamelCase__ :Tuple = g.get_repo('''huggingface/transformers''' )
UpperCamelCase__ :Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCamelCase__ :List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a )
UpperCamelCase__ :List[Any] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 97 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
_a : Dict = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_a : int = [144, 192, 240]
_a : int = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_a : List[Any] = [96, 120, 144]
_a : Dict = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_a : Tuple = [64, 80, 96]
_a : List[Any] = [16, 16, 24, 48, 64, 80, 320]
_a : Optional[int] = 0.05
_a : List[str] = 2.0
if mobilevit_name.startswith('deeplabv3_' ):
_a : List[str] = 512
_a : Union[str, Any] = 16
_a : List[str] = 21
_a : Optional[int] = 'pascal-voc-id2label.json'
else:
_a : Tuple = 1000
_a : str = 'imagenet-1k-id2label.json'
_a : str = 'huggingface/label-files'
_a : Optional[int] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='dataset' ) , 'r' ) )
_a : Union[str, Any] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_a : List[Any] = idalabel
_a : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
for i in range(1 , 6 ):
if f"""layer_{i}.""" in name:
_a : str = name.replace(f"""layer_{i}.""" , f"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
_a : Tuple = name.replace('conv_1.' , 'conv_stem.' )
if ".block." in name:
_a : Optional[int] = name.replace('.block.' , '.' )
if "exp_1x1" in name:
_a : int = name.replace('exp_1x1' , 'expand_1x1' )
if "red_1x1" in name:
_a : List[Any] = name.replace('red_1x1' , 'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
_a : Optional[Any] = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
_a : Dict = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' )
if ".norm." in name:
_a : Any = name.replace('.norm.' , '.normalization.' )
if ".conv." in name:
_a : Optional[Any] = name.replace('.conv.' , '.convolution.' )
if ".conv_proj." in name:
_a : Dict = name.replace('.conv_proj.' , '.conv_projection.' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
_a : Tuple = name.replace(f""".{i}.{j}.""" , f""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
_a : List[str] = name.replace(f""".{i}.{j}.""" , f""".{i}.""" )
if "expand_1x1" in name:
_a : Dict = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
_a : Union[str, Any] = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
_a : Optional[Any] = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' )
for i in range(2 , 5 ):
if f""".global_rep.{i}.weight""" in name:
_a : List[Any] = name.replace(f""".global_rep.{i}.weight""" , '.layernorm.weight' )
if f""".global_rep.{i}.bias""" in name:
_a : Dict = name.replace(f""".global_rep.{i}.bias""" , '.layernorm.bias' )
if ".global_rep." in name:
_a : Tuple = name.replace('.global_rep.' , '.transformer.' )
if ".pre_norm_mha.0." in name:
_a : Optional[Any] = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
_a : int = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
_a : Optional[Any] = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
_a : Tuple = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
_a : int = name.replace('.pre_norm_ffn.4.' , '.output.dense.' )
if ".transformer." in name:
_a : Optional[Any] = name.replace('.transformer.' , '.transformer.layer.' )
if ".aspp_layer." in name:
_a : Tuple = name.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in name:
_a : Any = name.replace('.aspp_pool.' , '.' )
if "seg_head." in name:
_a : Union[str, Any] = name.replace('seg_head.' , 'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
_a : str = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' )
if "classifier.fc." in name:
_a : Optional[int] = name.replace('classifier.fc.' , 'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
_a : List[str] = 'mobilevit.' + name
return name
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Dict:
if base_model:
_a : str = ''
else:
_a : List[Any] = 'mobilevit.'
for key in orig_state_dict.copy().keys():
_a : int = orig_state_dict.pop(lowerCAmelCase_ )
if key[:8] == "encoder.":
_a : Dict = key[8:]
if "qkv" in key:
_a : Tuple = key.split('.' )
_a : List[str] = int(key_split[0][6:] ) - 1
_a : Any = int(key_split[3] )
_a : Optional[int] = model.get_submodule(f"""{model_prefix}encoder.layer.{layer_num}""" )
_a : int = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_a : Dict = (
f"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
_a : Union[str, Any] = val[:dim, :]
_a : List[Any] = val[dim : dim * 2, :]
_a : str = val[-dim:, :]
else:
_a : Dict = val[:dim]
_a : List[Any] = val[dim : dim * 2]
_a : List[Any] = val[-dim:]
else:
_a : int = val
return orig_state_dict
def __lowerCamelCase ( ) -> str:
_a : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_a : int = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Any:
_a : Optional[int] = get_mobilevit_config(lowerCAmelCase_ )
# load original state_dict
_a : Union[str, Any] = torch.load(lowerCAmelCase_ , map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
_a : Union[str, Any] = MobileViTForSemanticSegmentation(lowerCAmelCase_ ).eval()
else:
_a : Tuple = MobileViTForImageClassification(lowerCAmelCase_ ).eval()
_a : int = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_a : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_a : int = image_processor(images=prepare_img() , return_tensors='pt' )
_a : List[str] = model(**lowerCAmelCase_ )
_a : Any = outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_a : Tuple = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_a : Optional[Any] = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_a : Union[str, Any] = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8_624, -9.5_964], [-10.8840, -10.8158, -10.6659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase_ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
_a : Any = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
_a : Optional[Any] = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
_a : List[str] = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1E-4 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
_a : List[str] = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
_a : Optional[int] = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCAmelCase_ , organization='apple' )
model.push_to_hub(lowerCAmelCase_ , organization='apple' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowerCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 356 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> list:
if any(not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(lowerCAmelCase_ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(lowerCAmelCase_ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 107 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Union[str, Any] = ["pixel_values"]
def __init__( self , a = True , a = None , a = PILImageResampling.BILINEAR , a = True , a = None , a = True , a = 1 / 2_5_5 , a = True , a = None , a = None , **a , ) -> None:
super().__init__(**a )
lowercase__ : Dict = size if size is not None else {'shortest_edge': 2_5_6}
lowercase__ : Union[str, Any] = get_size_dict(a , default_to_square=a )
lowercase__ : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowercase__ : Tuple = get_size_dict(a )
lowercase__ : Union[str, Any] = do_resize
lowercase__ : List[Any] = size
lowercase__ : List[Any] = resample
lowercase__ : Any = do_center_crop
lowercase__ : Optional[Any] = crop_size
lowercase__ : int = do_rescale
lowercase__ : int = rescale_factor
lowercase__ : Dict = do_normalize
lowercase__ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCAmelCase ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ) -> np.ndarray:
lowercase__ : int = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase__ : Optional[int] = get_resize_output_image_size(a , size=size['shortest_edge'] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def _UpperCAmelCase ( self , a , a , a = None , **a , ) -> np.ndarray:
lowercase__ : List[str] = get_size_dict(a )
return center_crop(a , size=(size['height'], size['width']) , data_format=a , **a )
def _UpperCAmelCase ( self , a , a , a = None , **a ) -> np.ndarray:
return rescale(a , scale=a , data_format=a , **a )
def _UpperCAmelCase ( self , a , a , a , a = None , **a , ) -> np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def _UpperCAmelCase ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> Tuple:
lowercase__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowercase__ : Tuple = size if size is not None else self.size
lowercase__ : Tuple = get_size_dict(a , default_to_square=a )
lowercase__ : Optional[int] = resample if resample is not None else self.resample
lowercase__ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
lowercase__ : Union[str, Any] = get_size_dict(a )
lowercase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : List[Any] = image_mean if image_mean is not None else self.image_mean
lowercase__ : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase__ : Tuple = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowercase__ : int = [to_numpy_array(a ) for image in images]
if do_resize:
lowercase__ : List[Any] = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
lowercase__ : Any = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
lowercase__ : Any = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
lowercase__ : Any = [self.normalize(image=a , mean=a , std=a ) for image in images]
lowercase__ : str = [to_channel_dimension_format(a , a ) for image in images]
lowercase__ : int = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a )
| 77 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = ["image_processor", "tokenizer"]
A = "OwlViTImageProcessor"
A = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
__UpperCamelCase : str = kwargs.pop("feature_extractor" )
__UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )):
__UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ):
__UpperCamelCase : List[str] = []
# Maximum number of queries across batch
__UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCAmelCase ) != max_num_queries:
__UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase ))
__UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
encodings.append(_UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__UpperCamelCase : Optional[Any] = BatchEncoding()
__UpperCamelCase : Union[str, Any] = input_ids
__UpperCamelCase : List[str] = attention_mask
if query_images is not None:
__UpperCamelCase : str = BatchEncoding()
__UpperCamelCase : Any = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values
__UpperCamelCase : List[Any] = query_pixel_values
if images is not None:
__UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase : Optional[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a_ (self ) -> Tuple:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def a_ (self ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 298 | 0 |
from __future__ import annotations
from collections import namedtuple
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
snake_case__ : Optional[Any] = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ):
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ):
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
snake_case_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286 | 0 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=3 , _A=True , _A=True , _A=0.1 , _A=0.1 , _A=224 , _A=1000 , _A=[3, 3, 6, 4] , _A=[48, 56, 112, 220] , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = layer_depths
SCREAMING_SNAKE_CASE_ = embed_dims
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ) -> List[Any]:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=a_ , layer_scale_init_value=1E-5 , )
def _UpperCamelCase ( self , _A , _A , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = SwiftFormerModel(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _UpperCamelCase ( self , _A , _A , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ) -> Union[str, Any]:
(SCREAMING_SNAKE_CASE_) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCAmelCase_ =(
{"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(
self , config_class=a_ , has_text_modality=a_ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _UpperCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def _UpperCamelCase ( self ) -> List[Any]:
pass
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(a_ )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(a_ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def _UpperCamelCase ( self ) -> str:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = SwiftFormerModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def _UpperCamelCase ( self ) -> List[str]:
pass
def _UpperCamelCase ( self ) -> Optional[int]:
def check_hidden_states_output(_A , _A , _A ):
SCREAMING_SNAKE_CASE_ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(a_ , a_ ) )
SCREAMING_SNAKE_CASE_ = outputs.hidden_states
SCREAMING_SNAKE_CASE_ = 8
self.assertEqual(len(a_ ) , a_ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(a_ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(a_ , a_ , a_ )
def _UpperCamelCase ( self ) -> Dict:
def _config_zero_init(_A ):
SCREAMING_SNAKE_CASE_ = copy.deepcopy(a_ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(a_ , a_ , 1E-10 )
if isinstance(getattr(a_ , a_ , a_ ) , a_ ):
SCREAMING_SNAKE_CASE_ = _config_zero_init(getattr(a_ , a_ ) )
setattr(a_ , a_ , a_ )
return configs_no_init
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = _config_zero_init(a_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(config=a_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _UpperCamelCase ( self ) -> int:
pass
def A__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self ) -> int:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(a_ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=a_ , return_tensors='''pt''' ).to(a_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**a_ )
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a_ )
SCREAMING_SNAKE_CASE_ = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1E-4 ) )
| 299 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = 0
if start < end:
lowerCAmelCase_ : Dict = randint(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : List[str] = a[end]
lowerCAmelCase_ : List[str] = a[pivot]
lowerCAmelCase_ : Any = temp
lowerCAmelCase_ , lowerCAmelCase_ : Any = _in_place_partition(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
count += _in_place_quick_sort(__UpperCamelCase , __UpperCamelCase , p - 1 )
count += _in_place_quick_sort(__UpperCamelCase , p + 1 , __UpperCamelCase )
return count
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Tuple = randint(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : str = a[end]
lowerCAmelCase_ : List[Any] = a[pivot]
lowerCAmelCase_ : Optional[Any] = temp
lowerCAmelCase_ : Dict = start - 1
for index in range(__UpperCamelCase , __UpperCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase_ : Dict = new_pivot_index + 1
lowerCAmelCase_ : Tuple = a[new_pivot_index]
lowerCAmelCase_ : List[Any] = a[index]
lowerCAmelCase_ : Optional[Any] = temp
lowerCAmelCase_ : Any = a[new_pivot_index + 1]
lowerCAmelCase_ : int = a[end]
lowerCAmelCase_ : str = temp
return new_pivot_index + 1, count
lowercase__ = TemporaryFile()
lowercase__ = 100 # 1000 elements are to be sorted
lowercase__ , lowercase__ = 0, 1 # mean and standard deviation
lowercase__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
lowercase__ = np.load(outfile)
lowercase__ = len(M) - 1
lowercase__ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 241 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( _a , unittest.TestCase ):
_lowerCAmelCase = LDMTextToImagePipeline
_lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {
'''negative_prompt''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
'''prompt_embeds''',
}
_lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
_lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCAmelCase = False
def _lowerCamelCase ( self : int ):
torch.manual_seed(0 )
a__: Optional[int] =UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
a__: int =DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
a__: List[str] =AutoencoderKL(
block_out_channels=(3_2, 6_4) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , )
torch.manual_seed(0 )
a__: Optional[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
a__: Optional[int] =CLIPTextModel(_a )
a__: List[str] =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
a__: Tuple ={
"unet": unet,
"scheduler": scheduler,
"vqvae": vae,
"bert": text_encoder,
"tokenizer": tokenizer,
}
return components
def _lowerCamelCase ( self : List[str] , _a : Any , _a : Optional[Any]=0 ):
if str(_a ).startswith("mps" ):
a__: List[Any] =torch.manual_seed(_a )
else:
a__: Optional[Any] =torch.Generator(device=_a ).manual_seed(_a )
a__: Optional[Any] ={
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _lowerCamelCase ( self : Dict ):
a__: str ="cpu" # ensure determinism for the device-dependent torch.Generator
a__: Dict =self.get_dummy_components()
a__: Dict =LDMTextToImagePipeline(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
a__: Optional[int] =self.get_dummy_inputs(_a )
a__: List[Any] =pipe(**_a ).images
a__: Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_6, 1_6, 3)
a__: Any =np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def _lowerCamelCase ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : str , _a : Any , _a : Any=torch.floataa , _a : List[str]=0 ):
a__: Union[str, Any] =torch.manual_seed(_a )
a__: List[str] =np.random.RandomState(_a ).standard_normal((1, 4, 3_2, 3_2) )
a__: List[Any] =torch.from_numpy(_a ).to(device=_a , dtype=_a )
a__: Any ={
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _lowerCamelCase ( self : Any ):
a__: List[Any] =LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(_a )
pipe.set_progress_bar_config(disable=_a )
a__: Dict =self.get_inputs(_a )
a__: Optional[Any] =pipe(**_a ).images
a__: str =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_5_6, 2_5_6, 3)
a__: Tuple =np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
a__: Optional[int] =np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Dict , _a : str , _a : List[Any]=torch.floataa , _a : Tuple=0 ):
a__: Optional[int] =torch.manual_seed(_a )
a__: List[Any] =np.random.RandomState(_a ).standard_normal((1, 4, 3_2, 3_2) )
a__: Dict =torch.from_numpy(_a ).to(device=_a , dtype=_a )
a__: Tuple ={
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 5_0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _lowerCamelCase ( self : int ):
a__: Dict =LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(_a )
pipe.set_progress_bar_config(disable=_a )
a__: int =self.get_inputs(_a )
a__: Dict =pipe(**_a ).images[0]
a__: Tuple =load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" )
a__: int =np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 42 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''spiece.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__UpperCAmelCase = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = '''left'''
def __init__( self : Dict , _a : List[Any] , _a : Any=False , _a : int=True , _a : Union[str, Any]=False , _a : Dict="<s>" , _a : str="</s>" , _a : Optional[int]="<unk>" , _a : Union[str, Any]="<sep>" , _a : List[Any]="<pad>" , _a : Optional[Any]="<cls>" , _a : str="<mask>" , _a : Any=["<eop>", "<eod>"] , _a : Optional[Dict[str, Any]] = None , **_a : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
a__: Dict =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
a__: Optional[int] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
a__: Dict =3
a__: Tuple =do_lower_case
a__: int =remove_space
a__: List[Any] =keep_accents
a__: List[str] =vocab_file
a__: Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _lowerCamelCase ( self : Any ):
return len(self.sp_model )
def _lowerCamelCase ( self : List[Any] ):
a__: Dict ={self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
a__: Dict =self.__dict__.copy()
a__: List[Any] =None
return state
def __setstate__( self : Optional[Any] , _a : Tuple ):
a__: List[Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__: List[str] ={}
a__: int =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self : Dict , _a : str ):
if self.remove_space:
a__: Optional[int] =" ".join(inputs.strip().split() )
else:
a__: Optional[int] =inputs
a__: Dict =outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
a__: Optional[int] =unicodedata.normalize("NFKD" , _a )
a__: int ="".join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
a__: Dict =outputs.lower()
return outputs
def _lowerCamelCase ( self : List[Any] , _a : str ):
a__: Dict =self.preprocess_text(_a )
a__: Dict =self.sp_model.encode(_a , out_type=_a )
a__: str =[]
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
a__: Optional[Any] =self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
a__: Optional[int] =cur_pieces[1:]
else:
a__: Tuple =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def _lowerCamelCase ( self : Dict , _a : Dict ):
return self.sp_model.PieceToId(_a )
def _lowerCamelCase ( self : Dict , _a : Optional[Any] ):
return self.sp_model.IdToPiece(_a )
def _lowerCamelCase ( self : Optional[Any] , _a : Tuple ):
a__: Tuple ="".join(_a ).replace(_a , " " ).strip()
return out_string
def _lowerCamelCase ( self : Optional[int] , _a : List[int] , _a : bool = False , _a : bool = None , _a : bool = True , **_a : Union[str, Any] , ):
a__: Optional[int] =kwargs.pop("use_source_tokenizer" , _a )
a__: Any =self.convert_ids_to_tokens(_a , skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
a__: List[str] =[]
a__: Any =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
a__: List[str] =[]
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
a__: Union[str, Any] ="".join(_a )
a__: List[Any] =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
a__: Optional[int] =self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _lowerCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ):
a__: Dict =[self.sep_token_id]
a__: Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1, 1]
return ([0] * len(_a )) + [1, 1]
def _lowerCamelCase ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None ):
a__: Any =[self.sep_token_id]
a__: List[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , _a : str , _a : Optional[str] = None ):
if not os.path.isdir(_a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
a__: List[Any] =os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
a__: Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 42 | 1 |
"""simple docstring"""
import logging
from transformers import PretrainedConfig
_a = logging.getLogger(__name__)
_a = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """bertabs"""
def __init__( self , lowercase_=3_0522 , lowercase_=512 , lowercase_=6 , lowercase_=512 , lowercase_=8 , lowercase_=512 , lowercase_=0.2 , lowercase_=6 , lowercase_=768 , lowercase_=8 , lowercase_=2048 , lowercase_=0.2 , **lowercase_ , ):
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : List[Any] = max_pos
UpperCAmelCase_ : int = enc_layers
UpperCAmelCase_ : str = enc_hidden_size
UpperCAmelCase_ : str = enc_heads
UpperCAmelCase_ : Dict = enc_ff_size
UpperCAmelCase_ : Tuple = enc_dropout
UpperCAmelCase_ : List[str] = dec_layers
UpperCAmelCase_ : Dict = dec_hidden_size
UpperCAmelCase_ : str = dec_heads
UpperCAmelCase_ : Union[str, Any] = dec_ff_size
UpperCAmelCase_ : Optional[int] = dec_dropout
| 61 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : int ) -> int:
__a = len(lowerCAmelCase__ )
__a = int(math.floor(math.sqrt(lowerCAmelCase__ ) ) )
__a = 0
while arr[min(lowerCAmelCase__ , lowerCAmelCase__ ) - 1] < x:
__a = step
step += int(math.floor(math.sqrt(lowerCAmelCase__ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__a = prev + 1
if prev == min(lowerCAmelCase__ , lowerCAmelCase__ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
lowercase_ = [int(item) for item in user_input.split(",")]
lowercase_ = int(input("Enter the number to be searched:\n"))
lowercase_ = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(F'''Number {x} is at index {res}''')
| 45 | 0 |
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def snake_case_ ( lowerCAmelCase_ : int = 5000 ):
__lowercase : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )]
for i, pentagonal_i in enumerate(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
__lowercase : int = pentagonal_nums[j]
__lowercase : Optional[int] = pentagonal_i + pentagonal_j
__lowercase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''') | 363 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case_ ( lowerCAmelCase_ : Tuple ):
if isinstance(lowerCAmelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCAmelCase :
'''simple docstring'''
def lowerCAmelCase ( self : Any , __a : Any , __a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : np.ndarray , __a : float ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(__a , __a , F"Difference between torch and flax is {diff} (>= {tol})." )
def lowerCAmelCase ( self : Tuple , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any]=None , **__a : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Optional[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : Dict , __a : Dict , __a : List[str] , __a : Optional[Any]=None , **__a : str ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Any = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict , __a : int=None , **__a : int ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Tuple = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : List[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Tuple = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = after_output[0]
__lowercase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-3 )
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Tuple , __a : Optional[int] , __a : str , __a : Optional[Any]=None , **__a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : str = self.get_vision_text_model(__a , __a )
__lowercase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Union[str, Any] = model(
input_ids=__a , pixel_values=__a , attention_mask=__a , output_attentions=__a )
__lowercase : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(__a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase : Optional[int] = to_atuple(vision_model.config.image_size )
__lowercase : List[str] = to_atuple(vision_model.config.patch_size )
__lowercase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase : Dict = output.text_model_output.attentions
self.assertEqual(len(__a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pt_model.to(__a )
pt_model.eval()
# prepare inputs
__lowercase : Union[str, Any] = inputs_dict
__lowercase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowercase : Union[str, Any] = pt_model(**__a ).to_tuple()
__lowercase : Tuple = fx_model(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__a )
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__a , from_pt=__a )
__lowercase : Dict = fx_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__a )
__lowercase : str = VisionTextDualEncoderModel.from_pretrained(__a , from_flax=__a )
pt_model_loaded.to(__a )
pt_model_loaded.eval()
with torch.no_grad():
__lowercase : List[Any] = pt_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__a , pt_output_loaded.numpy() , 4E-2 )
def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : int , __a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = VisionTextDualEncoderModel(__a )
__lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(__a )
__lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a )
__lowercase : Any = fx_state
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : Any , __a : Any , __a : Dict , __a : Tuple ) -> str:
"""simple docstring"""
__lowercase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : Union[str, Any] = VisionTextDualEncoderModel(__a )
__lowercase : Dict = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Tuple = load_flax_weights_in_pytorch_model(__a , fx_model.params )
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__a )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
self.check_save_load(**__a )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__a )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
__lowercase : Optional[int] = config_inputs_dict.pop("""vision_config""" )
__lowercase : Optional[int] = config_inputs_dict.pop("""text_config""" )
__lowercase : Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(__a , __a , __a )
self.check_equivalence_flax_to_pt(__a , __a , __a )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[Any] = self.get_pretrained_model_and_inputs()
__lowercase : Dict = model_a(**__a )
__lowercase : Any = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__a )
__lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Optional[int] = model_a(**__a )
__lowercase : Tuple = after_outputs[0]
__lowercase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-5 )
@require_flax
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : int = 13
__lowercase : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : Tuple = random_attention_mask([batch_size, 4] )
__lowercase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxViTModel(__a )
__lowercase : List[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = FlaxViTModelTester(self )
__lowercase : str = FlaxBertModelTester(self )
__lowercase : List[str] = vit_model_tester.prepare_config_and_inputs()
__lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Optional[int] = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Any = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : Tuple = 13
__lowercase : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : List[Any] = random_attention_mask([batch_size, 4] )
__lowercase : int = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : str , __a : str , __a : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = FlaxCLIPVisionModel(__a )
__lowercase : Optional[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = FlaxCLIPVisionModelTester(self )
__lowercase : Optional[Any] = FlaxBertModelTester(self )
__lowercase : Any = clip_model_tester.prepare_config_and_inputs()
__lowercase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Dict = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
__lowercase : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase : Tuple = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=__a , padding=__a , return_tensors="""np""" )
__lowercase : Optional[int] = model(**__a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase : Optional[Any] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __a , atol=1E-3 ) ) | 306 | 0 |
from collections import namedtuple
__snake_case :Tuple = namedtuple('''from_to''', '''from_ to''')
__snake_case :Dict = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_0_1, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'''cubicyard''': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'''cubicfoot''': from_to(0.0_2_8, 3_5.3_1_4_7),
'''cup''': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ''', '''.join(_UpperCAmelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ''', '''.join(_UpperCAmelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = 0
while len(SCREAMING_SNAKE_CASE__ ) > 1:
_SCREAMING_SNAKE_CASE : Any = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
_SCREAMING_SNAKE_CASE : Optional[int] = files.index(min(SCREAMING_SNAKE_CASE__ ) )
temp += files[min_index]
files.pop(SCREAMING_SNAKE_CASE__ )
files.append(SCREAMING_SNAKE_CASE__ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 200 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = "trocr"
lowerCamelCase__: List[str] = ["past_key_values"]
lowerCamelCase__: Any = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self: Tuple , __lowerCamelCase: List[str]=5_02_65 , __lowerCamelCase: List[str]=10_24 , __lowerCamelCase: List[str]=12 , __lowerCamelCase: Optional[int]=16 , __lowerCamelCase: Optional[Any]=40_96 , __lowerCamelCase: Any="gelu" , __lowerCamelCase: Optional[int]=5_12 , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: List[str]=0.0 , __lowerCamelCase: List[str]=0.0 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: List[Any]=0.02 , __lowerCamelCase: int=0.0 , __lowerCamelCase: List[str]=True , __lowerCamelCase: Union[str, Any]=False , __lowerCamelCase: List[Any]=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Tuple=1 , __lowerCamelCase: Tuple=0 , __lowerCamelCase: Any=2 , **__lowerCamelCase: Dict , ) -> Any:
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : int = d_model
__UpperCAmelCase : Tuple = decoder_layers
__UpperCAmelCase : Union[str, Any] = decoder_attention_heads
__UpperCAmelCase : Optional[Any] = decoder_ffn_dim
__UpperCAmelCase : Optional[int] = activation_function
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : str = dropout
__UpperCAmelCase : List[Any] = attention_dropout
__UpperCAmelCase : Union[str, Any] = activation_dropout
__UpperCAmelCase : List[str] = init_std
__UpperCAmelCase : Optional[Any] = decoder_layerdrop
__UpperCAmelCase : List[Any] = use_cache
__UpperCAmelCase : Union[str, Any] = scale_embedding
__UpperCAmelCase : str = use_learned_position_embeddings
__UpperCAmelCase : Tuple = layernorm_embedding
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
| 359 | import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_snake_case = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
_snake_case = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
_snake_case = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any:
for tf_name, hf_name in patterns:
__UpperCAmelCase : Optional[int] = k.replace(snake_case__, snake_case__ )
return k
def _UpperCamelCase ( snake_case__, snake_case__ ) -> BigBirdPegasusForConditionalGeneration:
__UpperCAmelCase : Dict = BigBirdPegasusConfig(**snake_case__ )
__UpperCAmelCase : Dict = BigBirdPegasusForConditionalGeneration(snake_case__ )
__UpperCAmelCase : Optional[Any] = torch_model.state_dict()
__UpperCAmelCase : Optional[int] = {}
# separating decoder weights
__UpperCAmelCase : List[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
__UpperCAmelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : Optional[int] = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : List[str] = DECODER_PATTERNS
__UpperCAmelCase : str = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : Optional[int] = v.T
__UpperCAmelCase : str = torch.from_numpy(snake_case__ )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : int = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : Optional[Any] = REMAINING_PATTERNS
__UpperCAmelCase : Optional[int] = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : List[Any] = v.T
__UpperCAmelCase : List[str] = torch.from_numpy(snake_case__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
__UpperCAmelCase : List[Any] = mapping["model.embed_positions.weight"]
__UpperCAmelCase : Optional[Any] = mapping.pop("model.embed_positions.weight" )
__UpperCAmelCase , __UpperCAmelCase : Any = torch_model.load_state_dict(snake_case__, strict=snake_case__ )
__UpperCAmelCase : str = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def _UpperCamelCase ( snake_case__ ) -> Dict:
__UpperCAmelCase : Tuple = tf.train.list_variables(snake_case__ )
__UpperCAmelCase : List[str] = {}
__UpperCAmelCase : str = ["global_step"]
for name, shape in tqdm(snake_case__, desc="converting tf checkpoint to dict" ):
__UpperCAmelCase : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
__UpperCAmelCase : Optional[Any] = tf.train.load_variable(snake_case__, snake_case__ )
__UpperCAmelCase : Tuple = array
return tf_weights
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : str = get_tf_weights_as_numpy(snake_case__ )
__UpperCAmelCase : List[Any] = convert_bigbird_pegasus(snake_case__, snake_case__ )
torch_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_snake_case = parser.parse_args()
_snake_case = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 342 | 0 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __lowerCAmelCase :
'''simple docstring'''
def __UpperCAmelCase ( self ):
torch.manual_seed(0 )
__a = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
__a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__a = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=_a , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
__a = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCAmelCase ( self ):
torch.manual_seed(0 )
__a = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
__a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.414 , time_embedding_act_fn='''gelu''' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__a = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=_a , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
__a = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
__a = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCAmelCase ( self ):
__a = self.get_dummy_components()
__a = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__a = self.get_dummy_inputs(_a )
__a = inputs['''prompt''']
__a = inputs['''generator''']
__a = inputs['''num_inference_steps''']
__a = inputs['''output_type''']
if "image" in inputs:
__a = inputs['''image''']
else:
__a = None
if "mask_image" in inputs:
__a = inputs['''mask_image''']
else:
__a = None
if "original_image" in inputs:
__a = inputs['''original_image''']
else:
__a = None
__a , __a = pipe.encode_prompt(_a )
# inputs with prompt converted to embeddings
__a = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
__a = image
if mask_image is not None:
__a = mask_image
if original_image is not None:
__a = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_a , _a , _a )
__a = pipe(**_a )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_a )
__a = self.pipeline_class.from_pretrained(_a )
pipe_loaded.to(_a )
pipe_loaded.set_progress_bar_config(disable=_a )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_a , _a ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
__a = self.get_dummy_inputs(_a )
__a = inputs['''generator''']
__a = inputs['''num_inference_steps''']
__a = inputs['''output_type''']
# inputs with prompt converted to embeddings
__a = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
__a = image
if mask_image is not None:
__a = mask_image
if original_image is not None:
__a = original_image
__a = pipe_loaded(**_a )[0]
__a = np.abs(to_np(_a ) - to_np(_a ) ).max()
self.assertLess(_a , 1E-4 )
def __UpperCAmelCase ( self ):
__a = self.get_dummy_components()
__a = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__a = self.get_dummy_inputs(_a )
__a = pipe(**_a )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_a )
__a = self.pipeline_class.from_pretrained(_a )
pipe_loaded.to(_a )
pipe_loaded.set_progress_bar_config(disable=_a )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__a = self.get_dummy_inputs(_a )
__a = pipe_loaded(**_a )[0]
__a = np.abs(to_np(_a ) - to_np(_a ) ).max()
self.assertLess(_a , 1E-4 )
| 45 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : list[str] ) -> str:
__a = ''''''
for word_or_phrase in separated:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 45 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_SCREAMING_SNAKE_CASE : Any = 16
_SCREAMING_SNAKE_CASE : Tuple = 32
def UpperCAmelCase_ ( _A , _A = 16 , _A = "bert-base-cased" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(_A )
SCREAMING_SNAKE_CASE__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_A ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_A , max_length=_A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE__ = datasets.map(
_A , batched=_A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_A , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return tokenizer.pad(_A , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=_A , collate_fn=_A , batch_size=_A )
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_A , collate_fn=_A , batch_size=_A )
return train_dataloader, eval_dataloader
def UpperCAmelCase_ ( _A , _A , _A , _A ):
'''simple docstring'''
model.eval()
SCREAMING_SNAKE_CASE__ = 0
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**_A )
SCREAMING_SNAKE_CASE__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
SCREAMING_SNAKE_CASE__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_A ) - 1:
SCREAMING_SNAKE_CASE__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_A , references=_A , )
SCREAMING_SNAKE_CASE__ = metric.compute()
return eval_metric["accuracy"]
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ = config["""lr"""]
SCREAMING_SNAKE_CASE__ = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE__ = int(config['''seed'''] )
SCREAMING_SNAKE_CASE__ = int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE__ = args.model_name_or_path
set_seed(_A )
SCREAMING_SNAKE_CASE__ = get_dataloaders(_A , _A , _A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ = AutoModelForSequenceClassification.from_pretrained(_A , return_dict=_A )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE__ = optimizer_cls(params=model.parameters() , lr=_A )
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = (len(_A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE__ = get_linear_schedule_with_warmup(
optimizer=_A , num_warmup_steps=0 , num_training_steps=_A , )
else:
SCREAMING_SNAKE_CASE__ = DummyScheduler(_A , total_num_steps=_A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ = accelerator.prepare(
_A , _A , _A , _A , _A )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE__ = 0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = evaluate.load('''glue''' , '''mrpc''' )
SCREAMING_SNAKE_CASE__ = num_epochs
if args.partial_train_epoch is not None:
SCREAMING_SNAKE_CASE__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
SCREAMING_SNAKE_CASE__ = args.resume_from_checkpoint.split('''epoch_''' )[1]
SCREAMING_SNAKE_CASE__ = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
SCREAMING_SNAKE_CASE__ = int(_A ) + 1
SCREAMING_SNAKE_CASE__ = evaluation_loop(_A , _A , _A , _A )
accelerator.print('''resumed checkpoint performance:''' , _A )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.load(_A )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
SCREAMING_SNAKE_CASE__ = {}
for epoch in range(_A , _A ):
model.train()
for step, batch in enumerate(_A ):
SCREAMING_SNAKE_CASE__ = model(**_A )
SCREAMING_SNAKE_CASE__ = outputs.loss
SCREAMING_SNAKE_CASE__ = loss / gradient_accumulation_steps
accelerator.backward(_A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
SCREAMING_SNAKE_CASE__ = F'''epoch_{epoch}'''
SCREAMING_SNAKE_CASE__ = os.path.join(args.output_dir , _A )
accelerator.save_state(_A )
SCREAMING_SNAKE_CASE__ = evaluation_loop(_A , _A , _A , _A )
SCREAMING_SNAKE_CASE__ = accuracy
SCREAMING_SNAKE_CASE__ = lr_scheduler.get_lr()[0]
SCREAMING_SNAKE_CASE__ = optimizer.param_groups[0]["""lr"""]
SCREAMING_SNAKE_CASE__ = epoch
SCREAMING_SNAKE_CASE__ = overall_step
accelerator.print(F'''epoch {epoch}:''' , _A )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , '''w''' ) as f:
json.dump(_A , _A )
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=_A , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_A , )
parser.add_argument(
'''--output_dir''' , type=_A , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=_A , default=_A , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=_A , default=_A , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=_A , default=2 , help='''Number of train epochs.''' , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(_A , _A )
if __name__ == "__main__":
main()
| 360 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : Tuple=0.01 , __lowerCamelCase : Optional[Any]=1000 ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = p_stop
SCREAMING_SNAKE_CASE__ = max_length
def __iter__( self : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = False
while not stop and count < self.max_length:
yield count
count += 1
SCREAMING_SNAKE_CASE__ = random.random() < self.p_stop
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=True ) -> Dict:
SCREAMING_SNAKE_CASE__ = [
BatchSamplerShard(__lowerCamelCase , 2 , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
for i in range(2 )
]
SCREAMING_SNAKE_CASE__ = [list(__lowerCamelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__lowerCamelCase ) for shard in batch_sampler_shards] , [len(__lowerCamelCase ) for e in expected] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Any ) -> Optional[Any]:
# Check the shards when the dataset is a round multiple of total batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> int:
# Check the shards when the dataset is a round multiple of batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
def lowercase_ ( self : str ) -> Dict:
# Check the shards when the dataset is a round multiple of total batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
def lowercase_ ( self : List[str] ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
SCREAMING_SNAKE_CASE__ = [BatchSamplerShard(__lowerCamelCase , 2 , __lowerCamelCase , even_batches=__lowerCamelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple=False , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Dict=False ) -> str:
random.seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = list(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
IterableDatasetShard(
__lowerCamelCase , batch_size=__lowerCamelCase , drop_last=__lowerCamelCase , num_processes=__lowerCamelCase , process_index=__lowerCamelCase , split_batches=__lowerCamelCase , )
for i in range(__lowerCamelCase )
]
SCREAMING_SNAKE_CASE__ = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__lowerCamelCase )
iterable_dataset_lists.append(list(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
SCREAMING_SNAKE_CASE__ = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
self.assertTrue(len(__lowerCamelCase ) % shard_batch_size == 0 )
SCREAMING_SNAKE_CASE__ = []
for idx in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__lowerCamelCase ) < len(__lowerCamelCase ):
reference += reference
self.assertListEqual(__lowerCamelCase , reference[: len(__lowerCamelCase )] )
def lowercase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = RandomIterableDataset()
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
# Edge case with a very small dataset
SCREAMING_SNAKE_CASE__ = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = BatchSampler(range(16 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = SkipBatchSampler(__lowerCamelCase , 2 )
self.assertListEqual(list(__lowerCamelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = DataLoader(list(range(16 ) ) , batch_size=4 )
SCREAMING_SNAKE_CASE__ = skip_first_batches(__lowerCamelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowercase_ ( self : Union[str, Any] ) -> str:
Accelerator()
SCREAMING_SNAKE_CASE__ = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 218 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase_ = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowerCamelCase_ : Any = HfArgumentParser(InitializationArguments)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowerCamelCase_ : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowerCamelCase_ : Tuple = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
lowerCamelCase_ : int = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowerCamelCase_ : Any = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub) | 286 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__lowerCAmelCase = 8
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=BITS ):
_snake_case = x.device
_snake_case = (x * 255).int().clamp(0 , 255 )
_snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_SCREAMING_SNAKE_CASE )
_snake_case = rearrange(_SCREAMING_SNAKE_CASE , """d -> d 1 1""" )
_snake_case = rearrange(_SCREAMING_SNAKE_CASE , """b c h w -> b c 1 h w""" )
_snake_case = ((x & mask) != 0).float()
_snake_case = rearrange(_SCREAMING_SNAKE_CASE , """b c d h w -> b (c d) h w""" )
_snake_case = bits * 2 - 1
return bits
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=BITS ):
_snake_case = x.device
_snake_case = (x > 0).int()
_snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_SCREAMING_SNAKE_CASE , dtype=torch.intaa )
_snake_case = rearrange(_SCREAMING_SNAKE_CASE , """d -> d 1 1""" )
_snake_case = rearrange(_SCREAMING_SNAKE_CASE , """b (c d) h w -> b c d h w""" , d=8 )
_snake_case = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 255).clamp(0.0 , 1.0 )
def __SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_snake_case = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_snake_case = self.alphas_cumprod[timestep]
_snake_case = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_snake_case = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_snake_case = self.bit_scale
if self.config.clip_sample:
_snake_case = torch.clamp(_SCREAMING_SNAKE_CASE , -scale , _SCREAMING_SNAKE_CASE )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_snake_case = self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_snake_case = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_snake_case = model_output.device if torch.is_tensor(_SCREAMING_SNAKE_CASE ) else """cpu"""
_snake_case = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
_snake_case = self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ** 0.5 * eta * noise
_snake_case = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="epsilon" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , ):
_snake_case = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_snake_case, _snake_case = torch.split(_SCREAMING_SNAKE_CASE , sample.shape[1] , dim=1 )
else:
_snake_case = None
# 1. compute alphas, betas
_snake_case = self.alphas_cumprod[t]
_snake_case = self.alphas_cumprod[t - 1] if t > 0 else self.one
_snake_case = 1 - alpha_prod_t
_snake_case = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_snake_case = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
_snake_case = self.bit_scale
if self.config.clip_sample:
_snake_case = torch.clamp(_SCREAMING_SNAKE_CASE , -scale , _SCREAMING_SNAKE_CASE )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_snake_case = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_snake_case = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_snake_case = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_snake_case = 0
if t > 0:
_snake_case = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_SCREAMING_SNAKE_CASE ).to(model_output.device )
_snake_case = (self._get_variance(_SCREAMING_SNAKE_CASE , predicted_variance=_SCREAMING_SNAKE_CASE ) ** 0.5) * noise
_snake_case = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1.0 , ) -> Union[str, Any]:
super().__init__()
_snake_case = bit_scale
_snake_case = (
ddim_bit_scheduler_step if isinstance(UpperCAmelCase , UpperCAmelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__(self , UpperCAmelCase = 256 , UpperCAmelCase = 256 , UpperCAmelCase = 50 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = "pil" , UpperCAmelCase = True , **UpperCAmelCase , ) -> Union[Tuple, ImagePipelineOutput]:
_snake_case = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=UpperCAmelCase , )
_snake_case = decimal_to_bits(UpperCAmelCase ) * self.bit_scale
_snake_case = latents.to(self.device )
self.scheduler.set_timesteps(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_snake_case = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
_snake_case = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
_snake_case = bits_to_decimal(UpperCAmelCase )
if output_type == "pil":
_snake_case = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase ) | 270 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
) | 270 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
lowercase : Optional[Any] = 8
def SCREAMING_SNAKE_CASE__ ( __A , __A=BITS ) -> Tuple:
_snake_case = x.device
_snake_case = (x * 255).int().clamp(0 , 255 )
_snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__A )
_snake_case = rearrange(__A , 'd -> d 1 1' )
_snake_case = rearrange(__A , 'b c h w -> b c 1 h w' )
_snake_case = ((x & mask) != 0).float()
_snake_case = rearrange(__A , 'b c d h w -> b (c d) h w' )
_snake_case = bits * 2 - 1
return bits
def SCREAMING_SNAKE_CASE__ ( __A , __A=BITS ) -> Optional[int]:
_snake_case = x.device
_snake_case = (x > 0).int()
_snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__A , dtype=torch.intaa )
_snake_case = rearrange(__A , 'd -> d 1 1' )
_snake_case = rearrange(__A , 'b (c d) h w -> b c d h w' , d=8 )
_snake_case = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def SCREAMING_SNAKE_CASE__ ( self , __A , __A , __A , __A = 0.0 , __A = True , __A=None , __A = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_snake_case = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_snake_case = self.alphas_cumprod[timestep]
_snake_case = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_snake_case = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_snake_case = self.bit_scale
if self.config.clip_sample:
_snake_case = torch.clamp(__A , -scale , __A )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_snake_case = self._get_variance(__A , __A )
_snake_case = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_snake_case = model_output.device if torch.is_tensor(__A ) else 'cpu'
_snake_case = torch.randn(model_output.shape , dtype=model_output.dtype , generator=__A ).to(__A )
_snake_case = self._get_variance(__A , __A ) ** 0.5 * eta * noise
_snake_case = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__A , pred_original_sample=__A )
def SCREAMING_SNAKE_CASE__ ( self , __A , __A , __A , __A="epsilon" , __A=None , __A = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
_snake_case = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_snake_case , _snake_case = torch.split(__A , sample.shape[1] , dim=1 )
else:
_snake_case = None
# 1. compute alphas, betas
_snake_case = self.alphas_cumprod[t]
_snake_case = self.alphas_cumprod[t - 1] if t > 0 else self.one
_snake_case = 1 - alpha_prod_t
_snake_case = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_snake_case = model_output
else:
raise ValueError(F'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
_snake_case = self.bit_scale
if self.config.clip_sample:
_snake_case = torch.clamp(__A , -scale , __A )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_snake_case = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_snake_case = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_snake_case = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_snake_case = 0
if t > 0:
_snake_case = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__A ).to(model_output.device )
_snake_case = (self._get_variance(__A , predicted_variance=__A ) ** 0.5) * noise
_snake_case = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__A , pred_original_sample=__A )
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1.0 , ):
"""simple docstring"""
super().__init__()
_snake_case = bit_scale
_snake_case = (
ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self , lowerCAmelCase_ = 2_56 , lowerCAmelCase_ = 2_56 , lowerCAmelCase_ = 50 , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , )
_snake_case = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale
_snake_case = latents.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_snake_case = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_snake_case = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
_snake_case = bits_to_decimal(lowerCAmelCase_ )
if output_type == "pil":
_snake_case = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 42 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowercase : Dict = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowercase : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowercase : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[str, float]:
_snake_case = len([g for position, g in enumerate(__A ) if g == main_target[position]] )
return (item, float(__A ))
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[str, str]:
_snake_case = random.randint(0 , len(__A ) - 1 )
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str:
_snake_case = list(__A )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_snake_case = random.choice(__A )
return "".join(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , ) -> list[str]:
_snake_case = []
# Generate more children proportionally to the fitness score.
_snake_case = int(parent_a[1] * 100 ) + 1
_snake_case = 10 if child_n >= 10 else child_n
for _ in range(__A ):
_snake_case = population_score[random.randint(0 , __A )][0]
_snake_case , _snake_case = crossover(parent_a[0] , __A )
# Append new string to the population list.
pop.append(mutate(__A , __A ) )
pop.append(mutate(__A , __A ) )
return pop
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_snake_case = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(__A )
# Verify that the target contains no genes besides the ones inside genes variable.
_snake_case = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_snake_case = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(__A )
# Generate random starting population.
_snake_case = []
for _ in range(__A ):
population.append(''.join([random.choice(__A ) for i in range(len(__A ) )] ) )
# Just some logs to know what the algorithms is doing.
_snake_case , _snake_case = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__A )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_snake_case = [evaluate(__A , __A ) for item in population]
# Check if there is a matching evolution.
_snake_case = sorted(__A , key=lambda __A : x[1] , reverse=__A )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_snake_case = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__A )
# Normalize population score to be between 0 and 1.
_snake_case = [
(item, score / len(__A )) for item, score in population_score
]
# This is selection
for i in range(__A ):
population.extend(select(population_score[int(__A )] , __A , __A ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__A ) > N_POPULATION:
break
if __name__ == "__main__":
lowercase : str = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowercase : str = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowercase , lowercase , lowercase : Tuple = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 42 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase_ )
class lowercase ( lowercase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__SCREAMING_SNAKE_CASE : str = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
__SCREAMING_SNAKE_CASE : ClassVar[Features] = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
__SCREAMING_SNAKE_CASE : str = "question"
__SCREAMING_SNAKE_CASE : str = "context"
__SCREAMING_SNAKE_CASE : str = "answers"
@property
def a ( self ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 200 |
from __future__ import annotations
import os
from typing import Any
import requests
_UpperCAmelCase : int = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_UpperCAmelCase : Dict = BASE_URL + """/user"""
# https://github.com/settings/tokens
_UpperCAmelCase : Optional[Any] = os.environ.get("""USER_TOKEN""", """""")
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = {
'Authorization': F'''token {auth_token}''',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 200 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowercase__ ( unittest.TestCase):
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 0
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : str = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) )
SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : List[str] = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(UpperCamelCase__ ).to_dict()
config_dict.pop('''image_processor_type''' )
SCREAMING_SNAKE_CASE : str = CLIPImageProcessor(**UpperCamelCase__ )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
config.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Tuple ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[int] = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , '''clip-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained('''clip-base''' )
def __A ( self : List[str] ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained(UpperCamelCase__ , revision='''aaaaaa''' )
def __A ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __A ( self : List[Any] ):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(UpperCamelCase__ , trust_remote_code=UpperCamelCase__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __A ( self : Optional[Any] ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Union[str, Any] = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : Any = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) )
SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __A ( self : Any ):
'''simple docstring'''
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCamelCase__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 182 | import math
def A ( _lowercase ):
return math.sqrt(_lowercase ) * math.sqrt(_lowercase ) == num
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Tuple = n
while left <= right:
SCREAMING_SNAKE_CASE : Union[str, Any] = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
SCREAMING_SNAKE_CASE : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE : Optional[int] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182 | 1 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def UpperCAmelCase ( a_ , a_ = "cpu" , a_ = None ) -> None:
"""simple docstring"""
__A = torch.load(a_ , map_location=a_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(a_ , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
__A = v.half()
if save_path is None: # overwrite src_path
__A = src_path
torch.save(a_ , a_ )
if __name__ == "__main__":
fire.Fire(convert)
| 124 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
SCREAMING_SNAKE_CASE :Tuple = datasets.load_iris()
SCREAMING_SNAKE_CASE :Dict = np.array(data['data'])
SCREAMING_SNAKE_CASE :Optional[int] = np.array(data['target'])
SCREAMING_SNAKE_CASE :List[str] = data['target_names']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = train_test_split(X, y)
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
return np.linalg.norm(np.array(a_ ) - np.array(a_ ) )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_=5 ) -> Dict:
"""simple docstring"""
__A = zip(a_ , a_ )
# List of distances of all points from the point to be classified
__A = []
for data_point in data:
__A = euclidean_distance(data_point[0] , a_ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__A = [i[1] for i in sorted(a_ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__A = Counter(a_ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 124 | 1 |
def __UpperCAmelCase ( a_ , a_):
return abs(a_) if a == 0 else greatest_common_divisor(b % a , a_)
def __UpperCAmelCase ( a_ , a_):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
snake_case_ , snake_case_ = y, x % y
return abs(a_)
def __UpperCAmelCase ( ):
try:
snake_case_ = input('Enter two integers separated by comma (,): ').split(',')
snake_case_ = int(nums[0])
snake_case_ = int(nums[1])
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(a_ , a_)}''')
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(a_ , a_)}''')
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input')
if __name__ == "__main__":
main()
| 178 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=64 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> str:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = vocab_size - 1
def _UpperCamelCase ( self ) -> Tuple:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self ) -> int:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _UpperCamelCase ( self ) -> int:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.prepare_config_and_inputs()
snake_case_ = True
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self , a , a , a ) -> Optional[int]:
snake_case_ = GPTNeoXModel(config=a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
snake_case_ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a , a , a ) -> List[str]:
snake_case_ = True
snake_case_ = GPTNeoXModel(a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a , a , a , a ) -> int:
snake_case_ = GPTNeoXForCausalLM(config=a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , a , a , a , a ) -> Optional[int]:
snake_case_ = self.num_labels
snake_case_ = GPTNeoXForQuestionAnswering(a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self , a , a , a , a ) -> List[str]:
snake_case_ = self.num_labels
snake_case_ = GPTNeoXForSequenceClassification(a )
model.to(a )
model.eval()
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , a , a , a , a ) -> str:
snake_case_ = self.num_labels
snake_case_ = GPTNeoXForTokenClassification(a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self , a , a , a ) -> List[Any]:
snake_case_ = True
snake_case_ = GPTNeoXForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
snake_case_ = model(a , attention_mask=a , use_cache=a )
snake_case_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ = model(a , attention_mask=a , output_hidden_states=a )
snake_case_ = output_from_no_past['hidden_states'][0]
snake_case_ = model(
a , attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _UpperCamelCase ( self ) -> str:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCAmelCase = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _UpperCamelCase ( self ) -> str:
snake_case_ = GPTNeoXModelTester(self )
snake_case_ = ConfigTester(self , config_class=a , hidden_size=64 , num_attention_heads=8 )
def _UpperCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a )
def _UpperCamelCase ( self ) -> int:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _UpperCamelCase ( self ) -> Dict:
# This regression test was failing with PyTorch < 1.3
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case_ = None
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _UpperCamelCase ( self ) -> str:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(a , a , a )
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*a )
def _UpperCamelCase ( self ) -> Any:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def _UpperCamelCase ( self ) -> Any:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def _UpperCamelCase ( self ) -> List[str]:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _UpperCamelCase ( self , a ) -> int:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = ids_tensor([1, 10] , config.vocab_size )
snake_case_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case_ = GPTNeoXModel(a )
original_model.to(a )
original_model.eval()
snake_case_ = original_model(a ).last_hidden_state
snake_case_ = original_model(a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case_ = {'type': scaling_type, 'factor': 10.0}
snake_case_ = GPTNeoXModel(a )
scaled_model.to(a )
scaled_model.eval()
snake_case_ = scaled_model(a ).last_hidden_state
snake_case_ = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
snake_case_ = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(a )
snake_case_ = tokenizer('My favorite food is' , return_tensors='pt' ).to(a )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
snake_case_ = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
snake_case_ = model.generate(**a , do_sample=a , max_new_tokens=20 )
snake_case_ = tokenizer.batch_decode(a )[0]
self.assertEqual(a , a )
| 178 | 1 |
'''simple docstring'''
import functools
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> int:
# Validation
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not all(isinstance(__UpperCamelCase , __UpperCamelCase ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(__UpperCamelCase ) != 3 or not all(isinstance(__UpperCamelCase , __UpperCamelCase ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(__UpperCamelCase ) == 0:
return 0
if min(__UpperCamelCase ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(__UpperCamelCase ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
UpperCamelCase = set(__UpperCamelCase )
@functools.cache
def dynamic_programming(__UpperCamelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
'''simple docstring'''
from timeit import timeit
def lowercase__ ( __UpperCamelCase )-> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCamelCase = 0
while number:
number &= number - 1
result += 1
return result
def lowercase__ ( __UpperCamelCase )-> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCamelCase = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowercase__ ( )-> None:
def do_benchmark(__UpperCamelCase ) -> None:
UpperCamelCase = """import __main__ as z"""
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(__UpperCamelCase ) = }" )
UpperCamelCase = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=__UpperCamelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCamelCase ) = }" )
UpperCamelCase = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=__UpperCamelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(__UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 183 | 0 |
def __lowercase ( a__ = 50 ) -> int:
__SCREAMING_SNAKE_CASE = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 257 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 257 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Optional[int] = '▁'
a__ : Tuple = {'vocab_file': 'sentencepiece.bpe.model'}
a__ : Dict = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
a__ : List[str] = {
'facebook/xglm-564M': 2_0_4_8,
}
class UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase = None , **lowercase , ) -> None:
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__UpperCamelCase = 7
__UpperCamelCase = [f"<madeupword{i}>" for i in range(self.num_madeup_words )]
__UpperCamelCase = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
__UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCamelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCamelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
__UpperCamelCase = len(self.sp_model )
__UpperCamelCase = {f"<madeupword{i}>": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__UpperCAmelCase )
__UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
__UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowercase ) -> Any:
__UpperCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__UpperCamelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __lowerCamelCase ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase ))
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase ))
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __lowerCamelCase ( self ) -> Any:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , lowercase ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def __lowerCamelCase ( self , lowercase ) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCamelCase = self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCamelCase ( self , lowercase ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCamelCase ( self , lowercase ) -> Optional[int]:
__UpperCamelCase = """""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__UpperCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 366 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
a__ : Dict = ''
a__ : List[str] = ''
a__ : Optional[Any] = ''
a__ : Any = ''
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = tweepy.OAuthHandler(__A ,__A )
auth.set_access_token(__A ,__A )
__UpperCamelCase = tweepy.API(__A )
# initialize a list to hold all the tweepy Tweets
__UpperCamelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__UpperCamelCase = api.user_timeline(screen_name=__A ,count=200 )
# save most recent tweets
alltweets.extend(__A )
# save the id of the oldest tweet less one
__UpperCamelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__A ) > 0:
print(f"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
__UpperCamelCase = api.user_timeline(
screen_name=__A ,count=200 ,max_id=__A )
# save most recent tweets
alltweets.extend(__A )
# update the id of the oldest tweet less one
__UpperCamelCase = alltweets[-1].id - 1
print(f"...{len(__A )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
__UpperCamelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"new_{screen_name}_tweets.csv" ,"""w""" ) as f:
__UpperCamelCase = csv.writer(__A )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(__A )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 243 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def UpperCamelCase ( __magic_name__ : int ) -> Dict:
"""simple docstring"""
monkeypatch.setattr("""datasets.utils.deprecation_utils._emitted_deprecation_warnings""" , set() )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Union[str, Any] ) -> str:
"""simple docstring"""
class A :
'''simple docstring'''
def __init__(self : int , _UpperCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = metric_id
class A :
'''simple docstring'''
A__ = [MetricMock(__A ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def lowerCamelCase__ (self : Tuple ) -> Tuple:
"""simple docstring"""
return self._metrics
monkeypatch.setattr("""datasets.inspect.huggingface_hub""" , HfhMock() )
@pytest.mark.parametrize(
"""func, args""" , [(load_metric, ("""metrics/mse""",)), (list_metrics, ()), (inspect_metric, ("""metrics/mse""", """tmp_path"""))] )
def UpperCamelCase ( __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : Dict ) -> Optional[int]:
"""simple docstring"""
if "tmp_path" in args:
lowercase__ = tuple(arg if arg != """tmp_path""" else tmp_path for arg in args )
with pytest.warns(_lowerCamelCase , match="""https://huggingface.co/docs/evaluate""" ):
func(*_lowerCamelCase )
| 305 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | 0 |
'''simple docstring'''
from PIL import Image
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Image:
'''simple docstring'''
def brightness(__UpperCAmelCase ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__UpperCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
a : Optional[int] = change_brightness(img, 100)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 72 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class a :
def __init__( self : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]=13 , lowercase_ : int=64 , lowercase_ : Tuple=2 , lowercase_ : List[str]=3 , lowercase_ : str=True , lowercase_ : Dict=True , lowercase_ : int=32 , lowercase_ : int=5 , lowercase_ : Optional[Any]=4 , lowercase_ : Optional[Any]=37 , lowercase_ : List[Any]="gelu" , lowercase_ : Tuple=0.1 , lowercase_ : str=0.1 , lowercase_ : Any=10 , lowercase_ : List[str]=0.02 , lowercase_ : Tuple=[1, 16, 4, 4] , lowercase_ : Tuple=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = scope
snake_case_ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
snake_case_ = (self.image_size // 32) ** 2
snake_case_ = num_patches + 1
def A_ ( self : List[Any] ):
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def A_ ( self : Any ):
snake_case_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=lowercase_ , )
def A_ ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : int ):
snake_case_ = ViTHybridModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : List[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Optional[int] ):
snake_case_ = self.type_sequence_label_size
snake_case_ = ViTHybridForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self : List[Any] ):
snake_case_ = self.prepare_config_and_inputs()
snake_case_ ,snake_case_ ,snake_case_ = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
snake_case_ = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
def A_ ( self : Optional[Any] ):
snake_case_ = ViTHybridModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def A_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def A_ ( self : Any ):
pass
def A_ ( self : Dict ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def A_ ( self : Dict ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowercase_ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
def A_ ( self : Tuple ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def A_ ( self : List[Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def A_ ( self : Optional[Any] ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(lowercase_ )
for model_class in self.all_model_classes:
snake_case_ = model_class(config=lowercase_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
snake_case_ = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def A_ ( self : Tuple ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = ViTHybridModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def A_ ( self : Any ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A_ ( self : List[str] ):
snake_case_ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowercase_ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
snake_case_ = model(**lowercase_ )
# verify the logits
snake_case_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
snake_case_ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) )
@slow
@require_accelerate
def A_ ( self : Dict ):
snake_case_ = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
snake_case_ = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowercase_ , return_tensors='''pt''' )
snake_case_ = model(**lowercase_ )
snake_case_ = outputs.logits
# model predicts one of the 1000 ImageNet classes
snake_case_ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 72 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__UpperCamelCase : str = None
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : Optional[Any] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase : Dict = {
'''albert-base-v1''': 5_1_2,
'''albert-large-v1''': 5_1_2,
'''albert-xlarge-v1''': 5_1_2,
'''albert-xxlarge-v1''': 5_1_2,
'''albert-base-v2''': 5_1_2,
'''albert-large-v2''': 5_1_2,
'''albert-xlarge-v2''': 5_1_2,
'''albert-xxlarge-v2''': 5_1_2,
}
__UpperCamelCase : Optional[Any] = '''▁'''
class SCREAMING_SNAKE_CASE ( A_ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = AlbertTokenizer
def __init__( self : Any ,lowercase_ : Any=None ,lowercase_ : Dict=None ,lowercase_ : Dict=True ,lowercase_ : int=True ,lowercase_ : Union[str, Any]=False ,lowercase_ : Optional[Any]="[CLS]" ,lowercase_ : Dict="[SEP]" ,lowercase_ : Dict="<unk>" ,lowercase_ : int="[SEP]" ,lowercase_ : str="<pad>" ,lowercase_ : int="[CLS]" ,lowercase_ : str="[MASK]" ,**lowercase_ : Union[str, Any] ,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase__ : Union[str, Any] = (
AddedToken(lowercase_ ,lstrip=lowercase_ ,rstrip=lowercase_ ,normalized=lowercase_ )
if isinstance(lowercase_ ,lowercase_ )
else mask_token
)
super().__init__(
lowercase_ ,tokenizer_file=lowercase_ ,do_lower_case=lowercase_ ,remove_space=lowercase_ ,keep_accents=lowercase_ ,bos_token=lowercase_ ,eos_token=lowercase_ ,unk_token=lowercase_ ,sep_token=lowercase_ ,pad_token=lowercase_ ,cls_token=lowercase_ ,mask_token=lowercase_ ,**lowercase_ ,)
lowerCAmelCase__ : Tuple = do_lower_case
lowerCAmelCase__ : Tuple = remove_space
lowerCAmelCase__ : Optional[Any] = keep_accents
lowerCAmelCase__ : Any = vocab_file
lowerCAmelCase__ : Dict = False if not self.vocab_file else True
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : Tuple ,lowercase_ : str = None ):
lowerCAmelCase__ : Tuple = [self.sep_token_id]
lowerCAmelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : Tuple ,lowercase_ : List[str] = None ):
lowerCAmelCase__ : Dict = [self.sep_token_id]
lowerCAmelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : List[str] ,lowercase_ : List[str] ,lowercase_ : List[Any] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ : str = os.path.join(
lowercase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file ,lowercase_ )
return (out_vocab_file,)
| 106 |
import os
import numpy
import onnx
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= a.name
__lowercase= b.name
__lowercase= ''
__lowercase= ''
__lowercase= a == b
__lowercase= name_a
__lowercase= name_b
return res
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowercase__ , lowercase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ )
_graph_replace_input_with(node_proto.attribute[1].g , lowercase__ , lowercase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(lowercase__ , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= list(model.graph.initializer )
__lowercase= list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__lowercase= inits[i].name
__lowercase= inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= os.path.dirname(lowercase__ )
__lowercase= os.path.basename(lowercase__ )
__lowercase= onnx.load(os.path.join(lowercase__ , lowercase__ ) )
__lowercase= list(model.graph.initializer )
__lowercase= set()
__lowercase= {}
__lowercase= []
__lowercase= 0
for i in range(len(lowercase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowercase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowercase__ )
dup_set.add(lowercase__ )
__lowercase= inits[j].data_type
__lowercase= numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , lowercase__ )
total_reduced_size += mem_size
__lowercase= inits[i].name
__lowercase= inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowercase__ )
else:
__lowercase= [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
__lowercase= sorted(lowercase__ )
_remove_dup_initializers_from_model(lowercase__ , lowercase__ , lowercase__ )
__lowercase= 'optimized_' + model_file_name
__lowercase= os.path.join(lowercase__ , lowercase__ )
onnx.save(lowercase__ , lowercase__ )
return new_model
| 295 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
UpperCAmelCase_ = Dataset.from_dict(snake_case_ )
return dataset
class __A ( UpperCamelCase__ ):
def _lowercase (self : str ):
UpperCAmelCase_ = get_dataset()
UpperCAmelCase_ = make_duplicate_clusters(__a , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = get_dataset()
UpperCAmelCase_ , UpperCAmelCase_ = deduplicate_dataset(__a )
self.assertEqual(len(__a ) , 2 )
print(__a )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , __a )
| 106 | '''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: List[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ='Hello world! cécé herlolip'
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , snake_case_ : bool ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = FairseqRobertaModel.from_pretrained(snake_case_ )
roberta.eval() # disable dropout
UpperCAmelCase_ = roberta.model.encoder.sentence_encoder
UpperCAmelCase_ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , snake_case_ )
UpperCAmelCase_ = XLMRobertaXLForSequenceClassification(snake_case_ ) if classification_head else XLMRobertaXLForMaskedLM(snake_case_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase_ = roberta_sent_encoder.embed_tokens.weight
UpperCAmelCase_ = roberta_sent_encoder.embed_positions.weight
UpperCAmelCase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
UpperCAmelCase_ = roberta_sent_encoder.layer_norm.weight
UpperCAmelCase_ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase_ = model.roberta.encoder.layer[i]
UpperCAmelCase_ = roberta_sent_encoder.layers[i]
UpperCAmelCase_ = layer.attention
UpperCAmelCase_ = roberta_layer.self_attn_layer_norm.weight
UpperCAmelCase_ = roberta_layer.self_attn_layer_norm.bias
# self attention
UpperCAmelCase_ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
UpperCAmelCase_ = roberta_layer.self_attn.q_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.q_proj.bias
UpperCAmelCase_ = roberta_layer.self_attn.k_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.k_proj.bias
UpperCAmelCase_ = roberta_layer.self_attn.v_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase_ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
UpperCAmelCase_ = roberta_layer.self_attn.out_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
UpperCAmelCase_ = roberta_layer.final_layer_norm.weight
UpperCAmelCase_ = roberta_layer.final_layer_norm.bias
# intermediate
UpperCAmelCase_ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ = roberta_layer.fca.weight
UpperCAmelCase_ = roberta_layer.fca.bias
# output
UpperCAmelCase_ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ = roberta_layer.fca.weight
UpperCAmelCase_ = roberta_layer.fca.bias
# end of layer
if classification_head:
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].dense.weight
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].dense.bias
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].out_proj.weight
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
UpperCAmelCase_ = roberta.model.encoder.lm_head.dense.weight
UpperCAmelCase_ = roberta.model.encoder.lm_head.dense.bias
UpperCAmelCase_ = roberta.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase_ = roberta.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase_ = roberta.model.encoder.lm_head.weight
UpperCAmelCase_ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase_ = roberta.encode(snake_case_ ).unsqueeze(0 ) # batch of size 1
UpperCAmelCase_ = model(snake_case_ )[0]
if classification_head:
UpperCAmelCase_ = roberta.model.classification_heads["mnli"](roberta.extract_features(snake_case_ ) )
else:
UpperCAmelCase_ = roberta.model(snake_case_ )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
UpperCAmelCase_ = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(snake_case_ ).mkdir(parents=snake_case_ , exist_ok=snake_case_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 106 | 1 |
import fire
from utils import calculate_rouge, save_json
def _a ( a :Optional[int] , a :Tuple , a :Tuple=None , **a :Union[str, Any] ) -> Any:
a = [x.strip() for x in open(a ).readlines()]
a = [x.strip() for x in open(a ).readlines()][: len(a )]
a = calculate_rouge(a , a , **a )
if save_path is not None:
save_json(a , a , indent=a )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 0 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a_ :
"""simple docstring"""
def __init__( self : Optional[int] ,snake_case : Any ,snake_case : Dict=100 ,snake_case : List[Any]=13 ,snake_case : str=30 ,snake_case : List[str]=2 ,snake_case : List[Any]=3 ,snake_case : Tuple=True ,snake_case : Optional[Any]=True ,snake_case : int=32 ,snake_case : Tuple=4 ,snake_case : List[Any]=4 ,snake_case : Optional[Any]=37 ,snake_case : Optional[Any]="gelu" ,snake_case : Tuple=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : List[Any]=10 ,snake_case : Tuple=0.02 ,snake_case : List[str]=3 ,snake_case : Any=None ,snake_case : int=[0, 1, 2, 3] ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =100
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =out_indices
SCREAMING_SNAKE_CASE =num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE =num_patches + 1
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self : Dict ):
return BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=snake_case ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Tuple ,snake_case : Optional[Any] ,snake_case : Union[str, Any] ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =BeitModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : Dict ,snake_case : Any ,snake_case : List[str] ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Any ,snake_case : str ,snake_case : Any ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.type_sequence_label_size
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Tuple ,snake_case : str ,snake_case : Optional[int] ,snake_case : int ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =config_and_inputs
SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =BeitModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,has_text_modality=snake_case ,hidden_size=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def _lowerCAmelCase ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _lowerCAmelCase ( self : Union[str, Any] ):
pass
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
SCREAMING_SNAKE_CASE =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case ,nn.Linear ) )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE =['pixel_values']
self.assertListEqual(arg_names[:1] ,snake_case )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
def _lowerCAmelCase ( self : Any ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE =False
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.gradient_checkpointing_enable()
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =_config_zero_init(snake_case )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(config=snake_case )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,)
@slow
def _lowerCAmelCase ( self : List[str] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =BeitModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : Tuple ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).pixel_values.to(snake_case )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE =torch.ones((1, 196) ,dtype=torch.bool ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(pixel_values=snake_case ,bool_masked_pos=snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(snake_case )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,snake_case ,atol=1e-2 ) )
@slow
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =281
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 21841) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =2396
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] ,device=snake_case ,)
else:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] ,device=snake_case ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,snake_case ,atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case ,target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,snake_case )
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case )
SCREAMING_SNAKE_CASE =torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape ,snake_case )
| 334 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Tuple =logging.get_logger(__name__)
lowerCAmelCase__ : str ={
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __lowercase (A__ ):
"""simple docstring"""
_UpperCAmelCase = """informer"""
_UpperCAmelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "student_t" , lowerCAmelCase__ = "nll" , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = "mean" , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 6_4 , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = True , lowerCAmelCase__ = "gelu" , lowerCAmelCase__ = 0.05 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 1_0_0 , lowerCAmelCase__ = 0.02 , lowerCAmelCase__=True , lowerCAmelCase__ = "prob" , lowerCAmelCase__ = 5 , lowerCAmelCase__ = True , **lowerCAmelCase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = prediction_length
SCREAMING_SNAKE_CASE_ : List[str] = context_length or prediction_length
SCREAMING_SNAKE_CASE_ : str = distribution_output
SCREAMING_SNAKE_CASE_ : Optional[Any] = loss
SCREAMING_SNAKE_CASE_ : Tuple = input_size
SCREAMING_SNAKE_CASE_ : Tuple = num_time_features
SCREAMING_SNAKE_CASE_ : List[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE_ : Tuple = scaling
SCREAMING_SNAKE_CASE_ : List[Any] = num_dynamic_real_features
SCREAMING_SNAKE_CASE_ : Any = num_static_real_features
SCREAMING_SNAKE_CASE_ : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = cardinality
else:
SCREAMING_SNAKE_CASE_ : Dict = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
SCREAMING_SNAKE_CASE_ : List[Any] = embedding_dimension
else:
SCREAMING_SNAKE_CASE_ : Tuple = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
SCREAMING_SNAKE_CASE_ : Dict = num_parallel_samples
# Transformer architecture configuration
SCREAMING_SNAKE_CASE_ : str = input_size * len(self.lags_sequence ) + self._number_of_features
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : Optional[Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE_ : int = decoder_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ : Union[str, Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ : int = encoder_layers
SCREAMING_SNAKE_CASE_ : Optional[Any] = decoder_layers
SCREAMING_SNAKE_CASE_ : Any = dropout
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE_ : int = activation_dropout
SCREAMING_SNAKE_CASE_ : Dict = encoder_layerdrop
SCREAMING_SNAKE_CASE_ : Any = decoder_layerdrop
SCREAMING_SNAKE_CASE_ : List[str] = activation_function
SCREAMING_SNAKE_CASE_ : List[Any] = init_std
SCREAMING_SNAKE_CASE_ : Tuple = use_cache
# Informer
SCREAMING_SNAKE_CASE_ : int = attention_type
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sampling_factor
SCREAMING_SNAKE_CASE_ : Optional[Any] = distil
super().__init__(is_encoder_decoder=__snake_case , **__snake_case )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 353 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : Optional[int] = seq_length
SCREAMING_SNAKE_CASE_ : Any = is_training
SCREAMING_SNAKE_CASE_ : int = use_input_mask
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Dict = use_labels
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_size
SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Any = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : int = num_labels
SCREAMING_SNAKE_CASE_ : List[str] = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = DistilBertModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : int = model(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DistilBertForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = DistilBertForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = DistilBertForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = DistilBertForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DistilBertForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Any = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_UpperCAmelCase = (
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = DistilBertModelTester(self )
SCREAMING_SNAKE_CASE_ : str = ConfigTester(self , config_class=lowerCAmelCase__ , dim=3_7 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Any = DistilBertModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : str = model_class(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = torch.jit.trace(
lowerCAmelCase__ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , 'traced_model.pt' ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.jit.load(os.path.join(lowerCAmelCase__ , 'traced_model.pt' ) , map_location=lowerCAmelCase__ )
loaded(inputs_dict['input_ids'].to(lowerCAmelCase__ ) , inputs_dict['attention_mask'].to(lowerCAmelCase__ ) )
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = DistilBertModel.from_pretrained('distilbert-base-uncased' )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
SCREAMING_SNAKE_CASE_ : int = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 ) )
| 162 | 0 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__snake_case :List[str] = True
except ImportError:
__snake_case :Any = False
__snake_case :int = logging.get_logger(__name__) # pylint: disable=invalid-name
def __snake_case ( _UpperCAmelCase ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _A ( __lowerCAmelCase ):
@staticmethod
def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = parser.add_parser('''add-new-model''')
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''')
add_new_model_parser.add_argument('''--testing_file''' , type=lowerCAmelCase_ , help='''Configuration file on which to run.''')
add_new_model_parser.add_argument(
'''--path''' , type=lowerCAmelCase_ , help='''Path to cookiecutter. Should only be used for testing purposes.''')
add_new_model_parser.set_defaults(func=lowerCAmelCase_)
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=None , *__SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = testing
__a = testing_file
__a = path
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''')
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''')
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
__a = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(lowerCAmelCase_) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''')
__a = (
Path(lowerCAmelCase_).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent
)
__a = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowerCAmelCase_))
else:
with open(self._testing_file , '''r''') as configuration_file:
__a = json.load(lowerCAmelCase_)
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path) , no_input=lowerCAmelCase_ , extra_context=lowerCAmelCase_ , )
__a = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''') as configuration_file:
__a = json.load(lowerCAmelCase_)
__a = configuration['''lowercase_modelname''']
__a = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(F'{directory}/configuration.json')
__a = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
__a = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
__a = '''Flax''' in generate_tensorflow_pytorch_and_flax
__a = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_)
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=lowerCAmelCase_)
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , '''w'''):
pass
shutil.move(
F'{directory}/__init__.py' , F'{model_dir}/__init__.py' , )
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' , F'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(__SCREAMING_SNAKE_CASE : Dict):
with open(lowerCAmelCase_ , '''r''') as f:
__a = f.readlines()
with open(lowerCAmelCase_ , '''w''') as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowerCAmelCase_)
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py')
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' , F'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py')
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py')
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py')
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' , F'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py')
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py')
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py')
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' , F'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py')
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py')
shutil.move(
F'{directory}/{lowercase_model_name}.md' , F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
# Create temp file
__a , __a = mkstemp()
__a = False
with fdopen(lowerCAmelCase_ , '''w''') as new_file:
with open(lowerCAmelCase_) as old_file:
for line in old_file:
new_file.write(lowerCAmelCase_)
if line_to_copy_below in line:
__a = True
for line_to_copy in lines_to_copy:
new_file.write(lowerCAmelCase_)
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.')
# Copy the file permissions from the old file to the new file
copymode(lowerCAmelCase_ , lowerCAmelCase_)
# Remove original file
remove(lowerCAmelCase_)
# Move new file
move(lowerCAmelCase_ , lowerCAmelCase_)
def skip_units(__SCREAMING_SNAKE_CASE : Tuple):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__SCREAMING_SNAKE_CASE : Tuple):
with open(lowerCAmelCase_) as datafile:
__a = []
__a = False
__a = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
__a = line.split('''\"''')[1]
__a = skip_units(lowerCAmelCase_)
elif "# Below: " in line and "##" not in line:
__a = line.split('''\"''')[1]
__a = skip_units(lowerCAmelCase_)
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
__a = []
elif "# Replace with" in line and "##" not in line:
__a = []
elif "##" not in line:
lines_to_copy.append(lowerCAmelCase_)
remove(lowerCAmelCase_)
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py')
os.rmdir(lowerCAmelCase_)
| 49 | import math
import sys
def snake_case ( snake_case__ :int) -> int:
if number != int(snake_case__):
raise ValueError("""the value of input must be a natural number""")
if number < 0:
raise ValueError("""the value of input must not be a negative number""")
if number == 0:
return 1
_A = [-1] * (number + 1)
_A = 0
for i in range(1 , number + 1):
_A = sys.maxsize
_A = int(math.sqrt(snake_case__))
for j in range(1 , root + 1):
_A = 1 + answers[i - (j**2)]
_A = min(snake_case__ , snake_case__)
_A = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 | 0 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Any =OpenAIGPTTokenizer
UpperCamelCase__ : str =OpenAIGPTTokenizerFast
UpperCamelCase__ : List[str] =True
UpperCamelCase__ : int =False
def __lowercase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase : Optional[Any] =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase : Optional[Any] =dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__UpperCamelCase : int =['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
__UpperCamelCase : str =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase : str =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(lowerCamelCase__ ) )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
return "lower newer", "lower newer"
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__UpperCamelCase : Dict ='lower'
__UpperCamelCase : str =['low', 'er</w>']
__UpperCamelCase : List[Any] =tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Dict =tokens + ['<unk>']
__UpperCamelCase : Optional[int] =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase : Tuple =self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
# Simple input
__UpperCamelCase : Union[str, Any] ='This is a simple input'
__UpperCamelCase : Tuple =['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase : Union[str, Any] =('This is a simple input', 'This is a pair')
__UpperCamelCase : Union[str, Any] =[
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' )
# Simple input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' )
# Simple input
self.assertRaises(
lowerCamelCase__ , tokenizer_r.batch_encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' , )
# Pair input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' )
# Pair input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' )
# Pair input
self.assertRaises(
lowerCamelCase__ , tokenizer_r.batch_encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' , )
def __lowercase ( self ):
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __A ( a ):
"""simple docstring"""
pass
| 245 |
from __future__ import annotations
def A ( a_ ) -> int:
if not nums:
return 0
__UpperCamelCase : Any =nums[0]
__UpperCamelCase : Any =0
for num in nums[1:]:
__UpperCamelCase , __UpperCamelCase : List[Any] =(
max_excluding + num,
max(a_ ,a_ ),
)
return max(a_ ,a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.