code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
a : Optional[Any] = logging.get_logger(__name__)
a : Dict = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
a : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
a : Optional[int] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
__SCREAMING_SNAKE_CASE = 'whisper'
__SCREAMING_SNAKE_CASE = ['past_key_values']
__SCREAMING_SNAKE_CASE = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Union[str, Any] , a_ : Tuple=51_865 , a_ : Dict=80 , a_ : Any=6 , a_ : Optional[Any]=4 , a_ : Optional[int]=6 , a_ : Any=4 , a_ : str=1_536 , a_ : Optional[int]=1_536 , a_ : Optional[Any]=0.0 , a_ : Any=0.0 , a_ : Tuple=50_257 , a_ : List[str]=True , a_ : str=True , a_ : str="gelu" , a_ : Optional[Any]=256 , a_ : Union[str, Any]=0.0 , a_ : Any=0.0 , a_ : str=0.0 , a_ : List[str]=0.02 , a_ : List[str]=False , a_ : str=1_500 , a_ : Any=448 , a_ : str=50_256 , a_ : List[Any]=50_256 , a_ : Any=50_256 , a_ : int=None , a_ : Optional[Any]=[220, 50_256] , a_ : Optional[Any]=False , a_ : int=256 , a_ : List[str]=False , a_ : Dict=0.05 , a_ : List[str]=10 , a_ : int=2 , a_ : Tuple=0.0 , a_ : int=10 , a_ : Optional[int]=0 , a_ : Dict=7 , **a_ : List[Any] , ):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = num_mel_bins
__snake_case = d_model
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = encoder_ffn_dim
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = use_cache
__snake_case = encoder_layers
__snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case = max_source_positions
__snake_case = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__snake_case = classifier_proj_size
__snake_case = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case = apply_spec_augment
__snake_case = mask_time_prob
__snake_case = mask_time_length
__snake_case = mask_time_min_masks
__snake_case = mask_feature_prob
__snake_case = mask_feature_length
__snake_case = mask_feature_min_masks
__snake_case = median_filter_width
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , decoder_start_token_id=a_ , suppress_tokens=a_ , begin_suppress_tokens=a_ , **a_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
@property
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
__snake_case = {0: "batch"}
else:
__snake_case = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(a_ , direction="inputs" )
return common_inputs
def A ( self : List[Any] , a_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional["TensorType"] = None , a_ : int = 22_050 , a_ : float = 5.0 , a_ : int = 220 , ):
"""simple docstring"""
__snake_case = OrderedDict()
__snake_case = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=a_ , framework=a_ , sampling_rate=a_ , time_duration=a_ , frequency=a_ , )
__snake_case = encoder_inputs["input_features"].shape[2]
__snake_case = encoder_sequence_length // 2 if self.use_past else seq_length
__snake_case = super().generate_dummy_inputs(
preprocessor.tokenizer , a_ , a_ , a_ , a_ )
__snake_case = encoder_inputs.pop("input_features" )
__snake_case = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
__snake_case = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def A ( self : Dict ):
"""simple docstring"""
return 1e-3
| 69 |
from jiwer import compute_measures
import datasets
__a :List[Any] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__a :Union[str, Any] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
__a :str = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def __A ( self : Dict , UpperCAmelCase : Dict=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
A_ = 0
A_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
A_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 86 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase = {
"configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"],
"feature_extraction_whisper": ["WhisperFeatureExtractor"],
"processing_whisper": ["WhisperProcessor"],
"tokenization_whisper": ["WhisperTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["WhisperTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"WhisperForConditionalGeneration",
"WhisperModel",
"WhisperPreTrainedModel",
"WhisperForAudioClassification",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWhisperForConditionalGeneration",
"TFWhisperModel",
"TFWhisperPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"FlaxWhisperForConditionalGeneration",
"FlaxWhisperModel",
"FlaxWhisperPreTrainedModel",
"FlaxWhisperForAudioClassification",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 712 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 583 | 0 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Tuple = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:Optional[int]=0 ):
snake_case__ = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(_a ) )
snake_case__ = np.random.RandomState(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs()
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
snake_case__ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case__ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs()
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
snake_case__ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
# warmup pass to apply optimizations
snake_case__ = pipe(**self.get_dummy_inputs() )
snake_case__ = self.get_dummy_inputs()
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
snake_case__ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs()
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
snake_case__ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs()
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
snake_case__ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs()
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
snake_case__ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = ort.SessionOptions()
snake_case__ = False
return options
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
snake_case__ = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = '''A fantasy landscape, trending on artstation'''
snake_case__ = np.random.RandomState(0 )
snake_case__ = pipe(
prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type='''np''' , )
snake_case__ = output.images
snake_case__ = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
snake_case__ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
snake_case__ = init_image.resize((7_68, 5_12) )
snake_case__ = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = '''A fantasy landscape, trending on artstation'''
snake_case__ = np.random.RandomState(0 )
snake_case__ = pipe(
prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=_a , output_type='''np''' , )
snake_case__ = output.images
snake_case__ = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
snake_case__ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 33 |
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[float, float]:
# Check if the input is valid
if not len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 684 | 0 |
'''simple docstring'''
def _lowerCAmelCase( _lowerCAmelCase ) -> int:
snake_case__ : list[list[int]] = [[0 for _ in range(_lowerCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
snake_case__ : Tuple = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__a = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
__a = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 718 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
snake_case__ : Optional[int] = tmp_path / """cache"""
snake_case__ : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ : Tuple = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
snake_case__ : Optional[Any] = tmp_path / """cache"""
snake_case__ : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case__ : int = features.copy() if features else default_expected_features
snake_case__ : int = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ : Union[str, Any] = ParquetDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Optional[Any] = tmp_path / """cache"""
snake_case__ : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case__ : List[str] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
if issubclass(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : Union[str, Any] = parquet_path
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : Dict = [parquet_path]
snake_case__ : int = tmp_path / """cache"""
snake_case__ : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case__ : int = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=("train",) ) -> List[Any]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
for split in splits:
snake_case__ : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
snake_case__ : List[str] = tmp_path / """cache"""
snake_case__ : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ : Union[str, Any] = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
snake_case__ : List[Any] = tmp_path / """cache"""
snake_case__ : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case__ : Optional[Any] = features.copy() if features else default_expected_features
snake_case__ : Any = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ : List[str] = ParquetDatasetReader({"""train""": parquet_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
if split:
snake_case__ : List[str] = {split: parquet_path}
else:
snake_case__ : Optional[int] = """train"""
snake_case__ : Tuple = {"""train""": parquet_path, """test""": parquet_path}
snake_case__ : Optional[Any] = tmp_path / """cache"""
snake_case__ : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case__ : Tuple = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
snake_case__ : Any = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / """foo.parquet""" )
assert writer.write() > 0
snake_case__ : Optional[Any] = pq.ParquetFile(tmp_path / """foo.parquet""" )
snake_case__ : Optional[int] = pf.read()
assert dataset.data.table == output_table
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str:
snake_case__ : int = str(shared_datadir / """test_image_rgb.jpg""" )
snake_case__ : List[Any] = {"""image""": [image_path]}
snake_case__ : Dict = Features({"""image""": Image()} )
snake_case__ : Optional[int] = Dataset.from_dict(_lowerCAmelCase , features=_lowerCAmelCase )
snake_case__ : str = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / """foo.parquet""" )
assert writer.write() > 0
snake_case__ : Dict = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
snake_case__ : List[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=_lowerCAmelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str:
assert get_writer_batch_size(_lowerCAmelCase ) == expected
| 301 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowercase__ : Optional[int] = random.Random()
def a__ ( lowercase : Optional[int], lowercase : List[Any]=1.0, lowercase : Dict=None, lowercase : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
if rng is None:
_UpperCamelCase = global_rng
_UpperCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple=7 , lowerCAmelCase__ : List[str]=400 , lowerCAmelCase__ : str=2000 , lowerCAmelCase__ : List[Any]=1 , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : Optional[int]=16000 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : int=80 , lowerCAmelCase__ : Union[str, Any]=16 , lowerCAmelCase__ : Optional[Any]=64 , lowerCAmelCase__ : Union[str, Any]="hann_window" , lowerCAmelCase__ : List[str]=80 , lowerCAmelCase__ : Union[str, Any]=7600 , lowerCAmelCase__ : List[Any]=1e-1_0 , lowerCAmelCase__ : List[Any]=True , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = min_seq_length
_UpperCamelCase = max_seq_length
_UpperCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCamelCase = feature_size
_UpperCamelCase = padding_value
_UpperCamelCase = sampling_rate
_UpperCamelCase = do_normalize
_UpperCamelCase = num_mel_bins
_UpperCamelCase = hop_length
_UpperCamelCase = win_length
_UpperCamelCase = win_function
_UpperCamelCase = fmin
_UpperCamelCase = fmax
_UpperCamelCase = mel_floor
_UpperCamelCase = return_attention_mask
def snake_case__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Optional[int]=False ) -> List[str]:
'''simple docstring'''
def _flatten(lowerCAmelCase__ : Union[str, Any] ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
_UpperCamelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_UpperCamelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCamelCase = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
def snake_case__ ( self : str , lowerCAmelCase__ : str=False , lowerCAmelCase__ : Union[str, Any]=False ) -> Any:
'''simple docstring'''
if equal_length:
_UpperCamelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCamelCase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCamelCase = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : str = SpeechTaFeatureExtractor
def snake_case__ ( self : str ) -> str:
'''simple docstring'''
_UpperCamelCase = SpeechTaFeatureExtractionTester(self )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1e-3 ) )
def snake_case__ ( self : int ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCamelCase = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
_UpperCamelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
_UpperCamelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
# Test batched
_UpperCamelCase = feat_extract(lowerCAmelCase__ , return_tensors='''np''' ).input_values
_UpperCamelCase = feat_extract(lowerCAmelCase__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCamelCase = ['''longest''', '''max_length''', '''do_not_pad''']
_UpperCamelCase = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors='''np''' )
_UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase = range(800 , 1400 , 200 )
_UpperCamelCase = [floats_list((1, x) )[0] for x in lengths]
_UpperCamelCase = ['''longest''', '''max_length''', '''do_not_pad''']
_UpperCamelCase = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = feat_extract(lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ )
_UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCamelCase = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding='''max_length''' , return_tensors='''np''' )
_UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case__ ( self : Any ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCamelCase = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding='''longest''' , return_tensors='''np''' )
_UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
_UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCamelCase = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=2000 , padding='''longest''' , return_tensors='''np''' )
_UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def snake_case__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase = np.random.rand(100 ).astype(np.floataa )
_UpperCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCamelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_UpperCamelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def snake_case__ ( self : str ) -> str:
'''simple docstring'''
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCamelCase = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
_UpperCamelCase = feature_extractor(audio_target=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
_UpperCamelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
_UpperCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
# Test batched
_UpperCamelCase = feature_extractor(lowerCAmelCase__ , return_tensors='''np''' ).input_values
_UpperCamelCase = feature_extractor(lowerCAmelCase__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCamelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCamelCase = np.asarray(lowerCAmelCase__ )
_UpperCamelCase = feature_extractor(lowerCAmelCase__ , return_tensors='''np''' ).input_values
_UpperCamelCase = feature_extractor(lowerCAmelCase__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
def snake_case__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target()
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
_UpperCamelCase = feat_extract.model_input_names[0]
_UpperCamelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__ , processed_features[input_name] ) ) )
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
_UpperCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
_UpperCamelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_UpperCamelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
_UpperCamelCase = feat_extract.model_input_names[0]
_UpperCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
_UpperCamelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_UpperCamelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def snake_case__ ( self : int ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target()
_UpperCamelCase = feat_extract.model_input_names[0]
_UpperCamelCase = BatchFeature({input_name: speech_inputs} )
_UpperCamelCase = feat_extract.num_mel_bins # hack!
_UpperCamelCase = feat_extract.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''np''' )[input_name]
_UpperCamelCase = feat_extract.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def snake_case__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.feat_extract_dict
_UpperCamelCase = True
_UpperCamelCase = self.feature_extraction_class(**lowerCAmelCase__ )
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target()
_UpperCamelCase = [len(lowerCAmelCase__ ) for x in speech_inputs]
_UpperCamelCase = feat_extract.model_input_names[0]
_UpperCamelCase = BatchFeature({input_name: speech_inputs} )
_UpperCamelCase = feat_extract.num_mel_bins # hack!
_UpperCamelCase = feat_extract.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , lowerCAmelCase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase__ )
def snake_case__ ( self : str ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.feat_extract_dict
_UpperCamelCase = True
_UpperCamelCase = self.feature_extraction_class(**lowerCAmelCase__ )
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target()
_UpperCamelCase = [len(lowerCAmelCase__ ) for x in speech_inputs]
_UpperCamelCase = feat_extract.model_input_names[0]
_UpperCamelCase = BatchFeature({input_name: speech_inputs} )
_UpperCamelCase = min(lowerCAmelCase__ )
_UpperCamelCase = feat_extract.num_mel_bins # hack!
_UpperCamelCase = feat_extract.pad(
lowerCAmelCase__ , padding='''max_length''' , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='''np''' )
self.assertIn('''attention_mask''' , lowerCAmelCase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def snake_case__ ( self : Dict , lowerCAmelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
from datasets import load_dataset
_UpperCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_UpperCamelCase = ds.sort('''id''' ).select(range(lowerCAmelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def snake_case__ ( self : List[str] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
_UpperCamelCase = self._load_datasamples(1 )
_UpperCamelCase = SpeechTaFeatureExtractor()
_UpperCamelCase = feature_extractor(lowerCAmelCase__ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCAmelCase__ , atol=1e-6 ) )
def snake_case__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
_UpperCamelCase = self._load_datasamples(1 )
_UpperCamelCase = SpeechTaFeatureExtractor()
_UpperCamelCase = feature_extractor(audio_target=lowerCAmelCase__ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase__ , atol=1e-4 ) )
| 98 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : List[Any] = """segformer"""
def __init__( self , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=[2, 2, 2, 2] , lowerCamelCase_=[8, 4, 2, 1] , lowerCamelCase_=[3_2, 6_4, 1_6_0, 2_5_6] , lowerCamelCase_=[7, 3, 3, 3] , lowerCamelCase_=[4, 2, 2, 2] , lowerCamelCase_=[1, 2, 5, 8] , lowerCamelCase_=[4, 4, 4, 4] , lowerCamelCase_="gelu" , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.1 , lowerCamelCase_=0.02 , lowerCamelCase_=0.1 , lowerCamelCase_=1e-6 , lowerCamelCase_=2_5_6 , lowerCamelCase_=2_5_5 , **lowerCamelCase_ , ) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , lowerCamelCase_ , )
_a : Union[str, Any] = num_channels
_a : Any = num_encoder_blocks
_a : Union[str, Any] = depths
_a : int = sr_ratios
_a : List[str] = hidden_sizes
_a : Tuple = patch_sizes
_a : Any = strides
_a : List[Any] = mlp_ratios
_a : str = num_attention_heads
_a : str = hidden_act
_a : List[Any] = hidden_dropout_prob
_a : int = attention_probs_dropout_prob
_a : Any = classifier_dropout_prob
_a : Optional[Any] = initializer_range
_a : int = drop_path_rate
_a : int = layer_norm_eps
_a : Optional[Any] = decoder_hidden_size
_a : int = kwargs.get('reshape_last_stage' , lowerCamelCase_ )
_a : str = semantic_loss_ignore_index
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Any = version.parse("""1.11""" )
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCamelCase ( self ) -> float:
return 1e-4
@property
def __UpperCamelCase ( self ) -> int:
return 1_2
| 120 | 0 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase_ (_UpperCAmelCase ):
def __init__( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) ->str:
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
_a = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , a_ , standard_warn=a_ )
_a = dict(scheduler.config )
_a = 1
_a = FrozenDict(a_ )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
_a = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , a_ , standard_warn=a_ )
_a = dict(scheduler.config )
_a = True
_a = FrozenDict(a_ )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=a_ , segmentation_processor=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , unet=a_ , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , )
def lowerCamelCase__ ( self , a_ = "auto" ) ->str:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a_ )
def lowerCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
self.enable_attention_slicing(a_ )
def lowerCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_a = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(a_ , a_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , a_ , a_ , a_ , a_ = 5_1_2 , a_ = 5_1_2 , a_ = 5_0 , a_ = 7.5 , a_ = None , a_ = 1 , a_ = 0.0 , a_ = None , a_ = None , a_ = "pil" , a_ = True , a_ = None , a_ = 1 , **a_ , ) ->Optional[Any]:
'''simple docstring'''
_a = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
_a = self.segmentation_model(**a_ )
_a = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
_a = self.numpy_to_pil(a_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
_a = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=a_ , image=a_ , mask_image=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , )
| 612 |
"""simple docstring"""
def lowerCAmelCase ( UpperCamelCase_: Optional[int] , UpperCamelCase_: str ) -> List[Any]:
'''simple docstring'''
_a = (boundary[1] - boundary[0]) / steps
_a = boundary[0]
_a = boundary[1]
_a = make_points(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_a = 0.0
y += (h / 2.0) * f(UpperCamelCase_ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase_ )
y += (h / 2.0) * f(UpperCamelCase_ )
return y
def lowerCAmelCase ( UpperCamelCase_: str , UpperCamelCase_: Any , UpperCamelCase_: Any ) -> str:
'''simple docstring'''
_a = a + h
while x < (b - h):
yield x
_a = x + h
def lowerCAmelCase ( UpperCamelCase_: List[str] ) -> int: # enter your function here
'''simple docstring'''
_a = (x - 0) * (x - 0)
return y
def lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
_a = 0.0 # Lower bound of integration
_a = 1.0 # Upper bound of integration
_a = 10.0 # define number of steps or resolution
_a = [a, b] # define boundary of integration
_a = method_a(UpperCamelCase_ , UpperCamelCase_ )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 612 | 1 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_a = get_logger(__name__)
_a = r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class __A :
'''simple docstring'''
@add_start_docstrings(__lowerCAmelCase )
def __call__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class __A :
'''simple docstring'''
@add_start_docstrings(__lowerCAmelCase )
def __call__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class __A ( lowerCAmelCase ):
'''simple docstring'''
@add_start_docstrings(__lowerCAmelCase )
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
for processor in self:
lowerCamelCase__ = inspect.signature(processor.__call__ ).parameters
if len(__lowerCAmelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'Make sure that all the required parameters: {list(function_args.keys() )} for '
F'{processor.__class__} are passed to the logits processor.' )
lowerCamelCase__ = processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
else:
lowerCamelCase__ = processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return scores
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not (temperature > 0):
raise ValueError(F'`temperature` has to be a strictly positive float, but is {temperature}' )
lowerCamelCase__ = temperature
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = scores / self.temperature
return scores
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = -float('''Inf''' ) , __lowerCAmelCase = 1 ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or (min_tokens_to_keep < 1):
raise ValueError(F'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
lowerCamelCase__ = top_p
lowerCamelCase__ = filter_value
lowerCamelCase__ = min_tokens_to_keep
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = lax.top_k(__lowerCAmelCase , scores.shape[-1] )
lowerCamelCase__ = jnp.full_like(__lowerCAmelCase , self.filter_value )
lowerCamelCase__ = jax.nn.softmax(__lowerCAmelCase , axis=-1 ).cumsum(axis=-1 )
lowerCamelCase__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowerCamelCase__ = jnp.roll(__lowerCAmelCase , 1 )
score_mask |= score_mask.at[:, 0].set(__lowerCAmelCase )
# min tokens to keep
lowerCamelCase__ = score_mask.at[:, : self.min_tokens_to_keep].set(__lowerCAmelCase )
lowerCamelCase__ = jnp.where(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = jax.lax.sort_key_val(__lowerCAmelCase , __lowerCAmelCase )[-1]
return next_scores
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = -float('''Inf''' ) , __lowerCAmelCase = 1 ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or top_k <= 0:
raise ValueError(F'`top_k` has to be a strictly positive integer, but is {top_k}' )
lowerCamelCase__ = max(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = filter_value
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = scores.shape
lowerCamelCase__ = jnp.full(batch_size * vocab_size , self.filter_value )
lowerCamelCase__ = min(self.top_k , scores.shape[-1] ) # Safety check
lowerCamelCase__ , lowerCamelCase__ = lax.top_k(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = jnp.broadcast_to((jnp.arange(__lowerCAmelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
lowerCamelCase__ = topk_scores.flatten()
lowerCamelCase__ = topk_indices.flatten() + shift
lowerCamelCase__ = next_scores_flat.at[topk_indices_flat].set(__lowerCAmelCase )
lowerCamelCase__ = next_scores_flat.reshape(__lowerCAmelCase , __lowerCAmelCase )
return next_scores
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = bos_token_id
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = jnp.full(scores.shape , -float('''inf''' ) )
lowerCamelCase__ = 1 - jnp.bool_(cur_len - 1 )
lowerCamelCase__ = jnp.where(__lowerCAmelCase , new_scores.at[:, self.bos_token_id].set(0 ) , __lowerCAmelCase )
return scores
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = max_length
lowerCamelCase__ = eos_token_id
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = jnp.full(scores.shape , -float('''inf''' ) )
lowerCamelCase__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
lowerCamelCase__ = jnp.where(__lowerCAmelCase , new_scores.at[:, self.eos_token_id].set(0 ) , __lowerCAmelCase )
return scores
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or min_length < 0:
raise ValueError(F'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or eos_token_id < 0:
raise ValueError(F'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
lowerCamelCase__ = min_length
lowerCamelCase__ = eos_token_id
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
lowerCamelCase__ = jnp.where(__lowerCAmelCase , scores.at[:, self.eos_token_id].set(-float('''inf''' ) ) , __lowerCAmelCase )
return scores
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = list(__lowerCAmelCase )
lowerCamelCase__ = begin_index
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = 1 - jnp.bool_(cur_len - self.begin_index )
lowerCamelCase__ = jnp.where(__lowerCAmelCase , scores.at[:, self.begin_suppress_tokens].set(-float('''inf''' ) ) , __lowerCAmelCase )
return scores
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = list(__lowerCAmelCase )
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = scores.at[..., self.suppress_tokens].set(-float('''inf''' ) )
return scores
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = dict(__lowerCAmelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowerCamelCase__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowerCamelCase__ = force_token_array.at[index].set(__lowerCAmelCase )
lowerCamelCase__ = jnp.intaa(__lowerCAmelCase )
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
def _force_token(__lowerCAmelCase ):
lowerCamelCase__ = scores.shape[0]
lowerCamelCase__ = self.force_token_array[generation_idx]
lowerCamelCase__ = jnp.ones_like(__lowerCAmelCase , dtype=scores.dtype ) * -float('''inf''' )
lowerCamelCase__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
lowerCamelCase__ = lax.dynamic_update_slice(__lowerCAmelCase , __lowerCAmelCase , (0, current_token) )
return new_scores
lowerCamelCase__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(__lowerCAmelCase ) , lambda: scores , ) , )
return scores
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = generate_config.eos_token_id
lowerCamelCase__ = generate_config.no_timestamps_token_id
lowerCamelCase__ = generate_config.no_timestamps_token_id + 1
lowerCamelCase__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__lowerCAmelCase , '''max_initial_timestamp_index''' ):
lowerCamelCase__ = generate_config.max_initial_timestamp_index
else:
lowerCamelCase__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowerCamelCase__ = model_config.vocab_size
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = scores.at[:, self.no_timestamps_token_id].set(-float('''inf''' ) )
def handle_pairs(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = jnp.where((cur_len - self.begin_index) >= 1 , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __lowerCAmelCase , )
lowerCamelCase__ = jnp.where((cur_len - self.begin_index) < 2 , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , __lowerCAmelCase , __lowerCAmelCase , )
return jnp.where(
__lowerCAmelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('''inf''' ) ) , scores_k.at[: self.eos_token_id].set(-float('''inf''' ) ) , ) , __lowerCAmelCase , )
lowerCamelCase__ = jax.vmap(__lowerCAmelCase )(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = jnp.where(cur_len == self.begin_index , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __lowerCAmelCase , )
lowerCamelCase__ = self.timestamp_begin + self.max_initial_timestamp_index
lowerCamelCase__ = jnp.where(
__lowerCAmelCase , scores.at[:, last_allowed + 1 :].set(-float('''inf''' ) ) , __lowerCAmelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
lowerCamelCase__ = jax.nn.log_softmax(__lowerCAmelCase , axis=-1 )
def handle_cumulative_probs(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
lowerCamelCase__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('''inf''' ) ) , __lowerCAmelCase , )
lowerCamelCase__ = jax.vmap(__lowerCAmelCase )(__lowerCAmelCase , __lowerCAmelCase )
return scores
| 481 |
from __future__ import annotations
_a = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = graph
# mapping node to its parent in resulting breadth first tree
lowerCamelCase__ = {}
lowerCamelCase__ = source_vertex
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {self.source_vertex}
lowerCamelCase__ = None
lowerCamelCase__ = [self.source_vertex] # first in first out queue
while queue:
lowerCamelCase__ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__lowerCAmelCase )
lowerCamelCase__ = vertex
queue.append(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCamelCase__ = self.parent.get(__lowerCAmelCase )
if target_vertex_parent is None:
lowerCamelCase__ = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(__lowerCAmelCase )
return self.shortest_path(__lowerCAmelCase ) + F'->{target_vertex}'
if __name__ == "__main__":
_a = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 481 | 1 |
"""simple docstring"""
__A : Optional[int] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowercase ( UpperCamelCase : bytes ):
"""simple docstring"""
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCamelCase , UpperCamelCase ):
A__ : Union[str, Any] =F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(UpperCamelCase )
A__ : Optional[Any] ="".join(bin(UpperCamelCase )[2:].zfill(8 ) for byte in data )
A__ : Any =len(UpperCamelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
A__ : Dict =B"=" * ((6 - len(UpperCamelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCamelCase ) % 6)
else:
A__ : str =B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCamelCase ) , 6 ) ).encode()
+ padding
)
def lowercase ( UpperCamelCase : str ):
"""simple docstring"""
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCamelCase , UpperCamelCase ) and not isinstance(UpperCamelCase , UpperCamelCase ):
A__ : Optional[Any] =(
"argument should be a bytes-like object or ASCII string, "
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(UpperCamelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCamelCase , UpperCamelCase ):
try:
A__ : Optional[Any] =encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
A__ : Any =encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
A__ : Dict =encoded_data[:-padding]
A__ : Optional[int] ="".join(
bin(B64_CHARSET.index(UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
A__ : Optional[Any] ="".join(
bin(B64_CHARSET.index(UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )
A__ : Optional[int] =[
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCamelCase ) , 8 )
]
return bytes(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 595 | """simple docstring"""
__A : int = [
(1_000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def lowercase ( UpperCamelCase : str ):
"""simple docstring"""
A__ : Union[str, Any] ={"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
A__ : Tuple =0
A__ : List[str] =0
while place < len(UpperCamelCase ):
if (place + 1 < len(UpperCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
A__ : Dict =[]
for arabic, roman in ROMAN:
((A__) , (A__)) : Union[str, Any] =divmod(UpperCamelCase , UpperCamelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 595 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _A ( __lowercase ):
def __init__( self : str , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Any = dataset
__snake_case : str = process
__snake_case : Union[str, Any] = params
def __len__( self : int ) -> Any:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : Any , __magic_name__ : List[str] ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = self.dataset[i]
__snake_case : Dict = self.process(__magic_name__ , **self.params )
return processed
class _A ( __lowercase ):
def __init__( self : List[str] , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Optional[int]=None ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = loader
__snake_case : List[Any] = infer
__snake_case : Tuple = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
__snake_case : Any = None
__snake_case : Tuple = loader_batch_size
# Internal bookkeeping
__snake_case : Union[str, Any] = None
__snake_case : Optional[Any] = None
def __len__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return len(self.loader )
def __iter__( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = iter(self.loader )
return self
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
__snake_case : List[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
__snake_case : Optional[Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(__magic_name__ , __magic_name__ ):
# Convert ModelOutput to tuple first
__snake_case : int = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
__snake_case : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__snake_case : Optional[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(__magic_name__ , __magic_name__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
__snake_case : Any = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__snake_case : Optional[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
__snake_case : Tuple = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__snake_case : List[str] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__snake_case : Union[str, Any] = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
__snake_case : int = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
__snake_case : Union[str, Any] = self._loader_batch_data.__class__(__magic_name__ )
self._loader_batch_index += 1
return result
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
__snake_case : List[Any] = next(self.iterator )
__snake_case : Union[str, Any] = self.infer(__magic_name__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(__magic_name__ , torch.Tensor ):
__snake_case : List[Any] = processed
else:
__snake_case : Optional[Any] = list(processed.keys() )[0]
__snake_case : Dict = processed[key]
if isinstance(__magic_name__ , __magic_name__ ):
__snake_case : List[str] = len(__magic_name__ )
else:
__snake_case : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__snake_case : Optional[int] = observed_batch_size
# Setting internal index to unwrap the batch
__snake_case : Union[str, Any] = processed
__snake_case : Any = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _A ( __lowercase ):
def __init__( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : int=None ) -> Tuple:
"""simple docstring"""
super().__init__(__magic_name__ , __magic_name__ , __magic_name__ )
def __iter__( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Dict = iter(self.loader )
__snake_case : List[str] = None
return self
def lowercase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
if self.subiterator is None:
__snake_case : Any = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
__snake_case : List[Any] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
__snake_case : List[Any] = self.infer(next(self.iterator ) , **self.params )
__snake_case : List[str] = next(self.subiterator )
return processed
class _A ( __lowercase ):
def __iter__( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Tuple = iter(self.loader )
return self
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Dict = False
__snake_case : Union[str, Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
__snake_case : Union[str, Any] = self.loader_batch_item()
__snake_case : List[str] = item.pop("""is_last""" )
accumulator.append(__magic_name__ )
if is_last:
return accumulator
while not is_last:
__snake_case : Union[str, Any] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(__magic_name__ , torch.Tensor ):
__snake_case : str = processed
else:
__snake_case : Optional[Any] = list(processed.keys() )[0]
__snake_case : List[str] = processed[key]
if isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Tuple = len(__magic_name__ )
else:
__snake_case : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__snake_case : List[str] = observed_batch_size
__snake_case : List[Any] = processed
__snake_case : str = 0
while self._loader_batch_index < self.loader_batch_size:
__snake_case : List[Any] = self.loader_batch_item()
__snake_case : List[Any] = item.pop("""is_last""" )
accumulator.append(__magic_name__ )
if is_last:
return accumulator
else:
__snake_case : Tuple = processed
__snake_case : List[Any] = item.pop("""is_last""" )
accumulator.append(__magic_name__ )
return accumulator
class _A ( __lowercase ):
def __init__( self : str , __magic_name__ : Dataset , __magic_name__ : str ) -> str:
"""simple docstring"""
__snake_case : List[Any] = dataset
__snake_case : Optional[int] = key
def __len__( self : str ) -> Tuple:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : Optional[Any] , __magic_name__ : Dict ) -> Optional[Any]:
"""simple docstring"""
return self.dataset[i][self.key]
class _A ( __lowercase ):
def __init__( self : Optional[Any] , __magic_name__ : Dataset , __magic_name__ : str , __magic_name__ : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[str] = dataset
__snake_case : Any = keya
__snake_case : int = keya
def __len__( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : Union[str, Any] , __magic_name__ : Tuple ) -> Dict:
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 26 |
def lowerCAmelCase__ ( a__: list ) -> list:
'''simple docstring'''
if len(a__ ) < 2:
return collection
def circle_sort_util(a__: list , a__: int , a__: int ) -> bool:
_UpperCAmelCase = False
if low == high:
return swapped
_UpperCAmelCase = low
_UpperCAmelCase = high
while left < right:
if collection[left] > collection[right]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right],
collection[left],
)
_UpperCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right + 1],
collection[left],
)
_UpperCAmelCase = True
_UpperCAmelCase = low + int((high - low) / 2 )
_UpperCAmelCase = circle_sort_util(a__ , a__ , a__ )
_UpperCAmelCase = circle_sort_util(a__ , mid + 1 , a__ )
return swapped or left_swap or right_swap
_UpperCAmelCase = True
while is_not_sorted is True:
_UpperCAmelCase = circle_sort_util(a__ , 0 , len(a__ ) - 1 )
return collection
if __name__ == "__main__":
lowerCAmelCase__ :Tuple = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ :List[str] = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 618 | 0 |
"""simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE = 100 )-> int:
_SCREAMING_SNAKE_CASE : Any = set()
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : Tuple = n + 1 # maximum limit
for a in range(2 , __SCREAMING_SNAKE_CASE ):
for b in range(2 , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = a**b # calculates the current power
collect_powers.add(__SCREAMING_SNAKE_CASE ) # adds the result to the set
return len(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 635 | """simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = ['''model.decoder.embed_positions.weights''']
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]:
if "emb" in name:
_SCREAMING_SNAKE_CASE : List[Any] = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
_SCREAMING_SNAKE_CASE : List[str] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
_SCREAMING_SNAKE_CASE : int = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
_SCREAMING_SNAKE_CASE : Tuple = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
_SCREAMING_SNAKE_CASE : str = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple[Dict, Dict]:
_SCREAMING_SNAKE_CASE : str = list(state_dict.keys() )
_SCREAMING_SNAKE_CASE : Tuple = {}
for key in keys:
_SCREAMING_SNAKE_CASE : Dict = state_dict.pop(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = rename_keys(__SCREAMING_SNAKE_CASE )
if "in_proj_weight" in key:
# split fused qkv proj
_SCREAMING_SNAKE_CASE : str = val[:hidden_size, :]
_SCREAMING_SNAKE_CASE : Any = val[hidden_size : 2 * hidden_size, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_SCREAMING_SNAKE_CASE : int = val
else:
_SCREAMING_SNAKE_CASE : Dict = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
_SCREAMING_SNAKE_CASE : Optional[Any] = 1_024
_SCREAMING_SNAKE_CASE : str = 24
_SCREAMING_SNAKE_CASE : Any = 16
elif checkpoint == "medium":
_SCREAMING_SNAKE_CASE : Dict = 1_536
_SCREAMING_SNAKE_CASE : Union[str, Any] = 48
_SCREAMING_SNAKE_CASE : Optional[Any] = 24
elif checkpoint == "large":
_SCREAMING_SNAKE_CASE : List[Any] = 2_048
_SCREAMING_SNAKE_CASE : Optional[int] = 48
_SCREAMING_SNAKE_CASE : str = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = MusicgenDecoderConfig(
hidden_size=__SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=__SCREAMING_SNAKE_CASE , num_attention_heads=__SCREAMING_SNAKE_CASE , )
return config
@torch.no_grad()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="cpu" )-> str:
_SCREAMING_SNAKE_CASE : str = MusicGen.get_pretrained(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[str] = decoder_config_from_checkpoint(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = fairseq_model.lm.state_dict()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = rename_state_dict(
__SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size )
_SCREAMING_SNAKE_CASE : Tuple = TaEncoderModel.from_pretrained("""t5-base""" )
_SCREAMING_SNAKE_CASE : List[Any] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
_SCREAMING_SNAKE_CASE : str = MusicgenForCausalLM(__SCREAMING_SNAKE_CASE ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
_SCREAMING_SNAKE_CASE : Dict = MusicgenForConditionalGeneration(text_encoder=__SCREAMING_SNAKE_CASE , audio_encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__SCREAMING_SNAKE_CASE )
# check we can do a forward pass
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_SCREAMING_SNAKE_CASE : Dict = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
_SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""t5-base""" )
_SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
_SCREAMING_SNAKE_CASE : Optional[int] = MusicgenProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
# set the appropriate bos/pad token ids
_SCREAMING_SNAKE_CASE : Optional[Any] = 2_048
_SCREAMING_SNAKE_CASE : List[Any] = 2_048
# set other default generation config params
_SCREAMING_SNAKE_CASE : Any = int(30 * audio_encoder.config.frame_rate )
_SCREAMING_SNAKE_CASE : Tuple = True
_SCREAMING_SNAKE_CASE : int = 3.0
if pytorch_dump_folder is not None:
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(__SCREAMING_SNAKE_CASE )
processor.push_to_hub(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 635 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ : List[Any] = {
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = [
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> str:
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 699 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _lowerCAmelCase :
def __init__(self , lowercase = None ):
if components is None:
A_ : Tuple = []
A_ : List[Any] = list(lowercase )
def __len__(self ):
return len(self.__components )
def __str__(self ):
return "(" + ",".join(map(lowercase , self.__components ) ) + ")"
def __add__(self , lowercase ):
A_ : Dict = len(self )
if size == len(lowercase ):
A_ : List[Any] = [self.__components[i] + other.component(lowercase ) for i in range(lowercase )]
return Vector(lowercase )
else:
raise Exception("""must have the same size""" )
def __sub__(self , lowercase ):
A_ : Any = len(self )
if size == len(lowercase ):
A_ : List[Any] = [self.__components[i] - other.component(lowercase ) for i in range(lowercase )]
return Vector(lowercase )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__(self , lowercase ):
...
@overload
def __mul__(self , lowercase ):
...
def __mul__(self , lowercase ):
if isinstance(lowercase , (float, int) ):
A_ : Dict = [c * other for c in self.__components]
return Vector(lowercase )
elif isinstance(lowercase , lowercase ) and len(self ) == len(lowercase ):
A_ : Optional[int] = len(self )
A_ : int = [self.__components[i] * other.component(lowercase ) for i in range(lowercase )]
return sum(lowercase )
else: # error case
raise Exception("""invalid operand!""" )
def _a (self ):
return Vector(self.__components )
def _a (self , lowercase ):
if isinstance(lowercase , lowercase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def _a (self , lowercase , lowercase ):
assert -len(self.__components ) <= pos < len(self.__components )
A_ : List[str] = value
def _a (self ):
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
A_ : int = [c**2 for c in self.__components]
return math.sqrt(sum(lowercase ) )
def _a (self , lowercase , lowercase = False ):
A_ : Dict = self * other
A_ : Optional[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def a ( lowerCamelCase__ ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
return Vector([0] * dimension )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (isinstance(lowerCamelCase__ , lowerCamelCase__ ))
A_ : Optional[Any] = [0] * dimension
A_ : Tuple = 1
return Vector(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase__ , lowerCamelCase__ )
and isinstance(lowerCamelCase__ , lowerCamelCase__ )
and (isinstance(lowerCamelCase__ , (int, float) ))
)
return x * scalar + y
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
random.seed(lowerCamelCase__ )
A_ : Optional[int] = [random.randint(lowerCamelCase__ , lowerCamelCase__ ) for _ in range(lowerCamelCase__ )]
return Vector(lowerCamelCase__ )
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase ):
A_ : int = matrix
A_ : List[str] = w
A_ : Optional[Any] = h
def __str__(self ):
A_ : Union[str, Any] = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__(self , lowercase ):
if self.__width == other.width() and self.__height == other.height():
A_ : Optional[Any] = []
for i in range(self.__height ):
A_ : int = [
self.__matrix[i][j] + other.component(lowercase , lowercase )
for j in range(self.__width )
]
matrix.append(lowercase )
return Matrix(lowercase , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__(self , lowercase ):
if self.__width == other.width() and self.__height == other.height():
A_ : Tuple = []
for i in range(self.__height ):
A_ : List[str] = [
self.__matrix[i][j] - other.component(lowercase , lowercase )
for j in range(self.__width )
]
matrix.append(lowercase )
return Matrix(lowercase , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__(self , lowercase ):
...
@overload
def __mul__(self , lowercase ):
...
def __mul__(self , lowercase ):
if isinstance(lowercase , lowercase ): # matrix-vector
if len(lowercase ) == self.__width:
A_ : Optional[Any] = zero_vector(self.__height )
for i in range(self.__height ):
A_ : int = [
self.__matrix[i][j] * other.component(lowercase )
for j in range(self.__width )
]
ans.change_component(lowercase , sum(lowercase ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowercase , (int, float) ): # matrix-scalar
A_ : List[str] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowercase , self.__width , self.__height )
return None
def _a (self ):
return self.__height
def _a (self ):
return self.__width
def _a (self , lowercase , lowercase ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def _a (self , lowercase , lowercase , lowercase ):
if 0 <= x < self.__height and 0 <= y < self.__width:
A_ : Optional[Any] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def _a (self , lowercase , lowercase ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
A_ : Optional[int] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowercase ) ):
A_ : Optional[Any] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowercase , self.__width - 1 , self.__height - 1 ).determinant()
def _a (self , lowercase , lowercase ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowercase , lowercase )
else:
raise Exception("""Indices out of bounds""" )
def _a (self ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A_ : List[Any] = [
self.__matrix[0][y] * self.cofactor(0 , lowercase ) for y in range(self.__width )
]
return sum(lowercase )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : list[list[float]] = [[0] * n for _ in range(lowerCamelCase__ )]
return Matrix(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
random.seed(lowerCamelCase__ )
A_ : list[list[float]] = [
[random.randint(lowerCamelCase__ , lowerCamelCase__ ) for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )
]
return Matrix(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) | 686 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCamelCase :Any = re.compile(R'''\s+''')
def a ( lowerCamelCase__ ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(lowerCamelCase__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = [len(lowerCamelCase__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(lowerCamelCase__ ), "line_max": max(lowerCamelCase__ )}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def a ( lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : Tuple = ["""auto-generated""", """autogenerated""", """automatically generated"""]
A_ : Optional[int] = example["""content"""].splitlines()
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a ( lowerCamelCase__ , lowerCamelCase__=5 , lowerCamelCase__=0.05 ):
'''simple docstring'''
A_ : Any = ["""unit tests""", """test file""", """configuration file"""]
A_ : List[str] = example["""content"""].splitlines()
A_ : str = 0
A_ : Union[str, Any] = 0
# first test
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
A_ : List[Any] = example["""content"""].count("""\n""" )
A_ : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = ["""def """, """class """, """for """, """while """]
A_ : Optional[int] = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a ( lowerCamelCase__ , lowerCamelCase__=4 ):
'''simple docstring'''
A_ : Tuple = example["""content"""].splitlines()
A_ : int = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = tokenizer(example["""content"""] , truncation=lowerCamelCase__ )["""input_ids"""]
A_ : Optional[Any] = len(example["""content"""] ) / len(lowerCamelCase__ )
return {"ratio": ratio}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = {}
results.update(get_hash(lowerCamelCase__ ) )
results.update(line_stats(lowerCamelCase__ ) )
results.update(alpha_stats(lowerCamelCase__ ) )
results.update(char_token_ratio(lowerCamelCase__ ) )
results.update(is_autogenerated(lowerCamelCase__ ) )
results.update(is_config_or_test(lowerCamelCase__ ) )
results.update(has_no_keywords(lowerCamelCase__ ) )
results.update(has_few_assignments(lowerCamelCase__ ) )
return results
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if not check_uniques(lowerCamelCase__ , lowerCamelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a ( lowerCamelCase__ ):
'''simple docstring'''
with open(lowerCamelCase__ , """rb""" ) as f_in:
with gzip.open(str(lowerCamelCase__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ )
os.unlink(lowerCamelCase__ )
# Settings
lowerCamelCase :Optional[int] = HfArgumentParser(PreprocessingArguments)
lowerCamelCase :Tuple = parser.parse_args()
if args.num_workers is None:
lowerCamelCase :Tuple = multiprocessing.cpu_count()
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCamelCase :List[Any] = time.time()
lowerCamelCase :Optional[int] = load_dataset(args.dataset_name, split='''train''')
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
lowerCamelCase :int = time.time()
lowerCamelCase :List[str] = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
lowerCamelCase :int = set(ds.unique('''hash'''))
lowerCamelCase :List[str] = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
lowerCamelCase :Dict = time.time()
lowerCamelCase :int = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCamelCase :List[str] = time.time()
lowerCamelCase , lowerCamelCase :int = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
lowerCamelCase :int = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
lowerCamelCase :Tuple = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
lowerCamelCase :Tuple = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCamelCase :List[str] = str(data_dir / F"file-{file_number+1:012}.json")
lowerCamelCase :Tuple = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}") | 686 | 1 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any]=13 , __UpperCAmelCase : Union[str, Any]=7 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : List[str]=99 , __UpperCAmelCase : str=32 , __UpperCAmelCase : Dict=5 , __UpperCAmelCase : int=4 , __UpperCAmelCase : List[str]=37 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : str=16 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : Optional[Any]=4 , ) ->int:
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_attention_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_choices
def __lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_attention_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = True
a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase_ ( UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = True
__snake_case = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
a = FlaxRobertaModelTester(self )
@slow
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained('''roberta-base''' , from_pt=_a )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
| 117 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] , _a : List[str] , _a : List[Any]=None , _a : Optional[Any]=None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.layer[current_layer](_a , _a , head_mask[current_layer] )
_SCREAMING_SNAKE_CASE =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =BertEncoderWithPabee(_a )
self.init_weights()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : List[str] , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =threshold
def __UpperCamelCase ( self : Dict , _a : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =patience
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.inference_layers_num / self.inference_instances_num
_SCREAMING_SNAKE_CASE =(
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any]=None , _a : Optional[int]=None , _a : Any=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : str=None , _a : Any=None , _a : str=None , _a : Optional[Any]=None , _a : Dict=False , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_SCREAMING_SNAKE_CASE =input_ids.size()
elif inputs_embeds is not None:
_SCREAMING_SNAKE_CASE =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_SCREAMING_SNAKE_CASE =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
if token_type_ids is None:
_SCREAMING_SNAKE_CASE =torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_SCREAMING_SNAKE_CASE =self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =encoder_hidden_states.size()
_SCREAMING_SNAKE_CASE =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
_SCREAMING_SNAKE_CASE =self.invert_attention_mask(_a )
else:
_SCREAMING_SNAKE_CASE =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_SCREAMING_SNAKE_CASE =self.get_head_mask(_a , self.config.num_hidden_layers )
_SCREAMING_SNAKE_CASE =self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
_SCREAMING_SNAKE_CASE =embedding_output
if self.training:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
_SCREAMING_SNAKE_CASE =self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_SCREAMING_SNAKE_CASE =self.pooler(encoder_outputs[0] )
_SCREAMING_SNAKE_CASE =[output_layers[self.config.num_hidden_layers - 1](_a )]
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](_a )
if regression:
_SCREAMING_SNAKE_CASE =logits.detach()
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =logits.detach().argmax(dim=1 )
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =logits
if patient_counter == self.patience:
break
_SCREAMING_SNAKE_CASE =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =config.num_labels
_SCREAMING_SNAKE_CASE =BertModelWithPabee(_a )
_SCREAMING_SNAKE_CASE =nn.Dropout(config.hidden_dropout_prob )
_SCREAMING_SNAKE_CASE =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[str] , _a : Optional[Any]=None , _a : List[Any]=None , _a : Union[str, Any]=None , _a : List[str]=None , _a : Dict=None , _a : Optional[Any]=None , _a : Optional[Any]=None , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_SCREAMING_SNAKE_CASE =(logits[-1],)
if labels is not None:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE =MSELoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =CrossEntropyLoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_SCREAMING_SNAKE_CASE =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_SCREAMING_SNAKE_CASE =(total_loss / total_weights,) + outputs
return outputs | 691 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
def lowerCAmelCase (__A):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(__SCREAMING_SNAKE_CASE):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''')
class __A ( lowercase__ ):
'''simple docstring'''
__lowerCamelCase : List[Any] = ['pixel_values']
def __init__(self , A = True , A = None , A = PILImageResampling.BILINEAR , A = True , A = None , A = True , A = 1 / 255 , A = True , A = None , A = None , **A , ) -> str:
"""simple docstring"""
super().__init__(**A )
_a = size if size is not None else {'''shortest_edge''': 224}
_a = get_size_dict(A , default_to_square=A )
_a = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_a = get_size_dict(A , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = do_center_crop
_a = crop_size
_a = resample
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ (self , A , A , A = PILImageResampling.BILINEAR , A = None , **A , ) -> List[Any]:
"""simple docstring"""
_a = get_size_dict(A , default_to_square=A )
if "shortest_edge" in size:
_a = get_resize_output_image_size(A , size['''shortest_edge'''] , default_to_square=A )
elif "height" in size and "width" in size:
_a = (size['''height'''], size['''width'''])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(A , size=A , resample=A , data_format=A , **A )
def a__ (self , A , A , A = None , **A , ) -> Optional[int]:
"""simple docstring"""
_a = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(A , size=(size['''height'''], size['''width''']) , data_format=A , **A )
def a__ (self , A , A , A = None , **A , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(A , scale=A , data_format=A , **A )
def a__ (self , A , A , A , A = None , **A , ) -> int:
"""simple docstring"""
return normalize(A , mean=A , std=A , data_format=A , **A )
def a__ (self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , ) -> str:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a = to_numpy_array(A )
if do_resize:
_a = self.resize(image=A , size=A , resample=A )
if do_center_crop:
_a = self.center_crop(A , size=A )
if do_rescale:
_a = self.rescale(image=A , scale=A )
if do_normalize:
_a = self.normalize(image=A , mean=A , std=A )
_a = to_channel_dimension_format(A , A )
return image
def a__ (self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> List[str]:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = size if size is not None else self.size
_a = get_size_dict(A , default_to_square=A )
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(A , param_name='''crop_size''' )
if not valid_images(A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
_a = make_batched(A )
_a = [
[
self._preprocess_image(
image=A , do_resize=A , size=A , resample=A , do_center_crop=A , crop_size=A , do_rescale=A , rescale_factor=A , do_normalize=A , image_mean=A , image_std=A , data_format=A , )
for img in video
]
for video in videos
]
_a = {'''pixel_values''': videos}
return BatchFeature(data=A , tensor_type=A )
| 701 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
lowercase_ = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
lowercase_ = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
def lowerCAmelCase (__A):
"""simple docstring"""
_a = (images / 2 + 0.5).clamp(0 , 1)
_a = images.cpu().permute(0 , 2 , 3 , 1).float().numpy()
_a = numpy_to_pil(__A)
return images
def lowerCAmelCase (__A):
"""simple docstring"""
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype('''uint8''')
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode='''L''') for image in images]
else:
_a = [Image.fromarray(__A) for image in images]
return pil_images
| 352 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : list[dict] = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []} )
for keyword in keywords:
self.add_keyword(A )
self.set_fail_transitions()
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = 0
for character in keyword:
SCREAMING_SNAKE_CASE : Any = self.find_next_state(A, A )
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE : List[str] = len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE : int = next_state
self.adlist[current_state]["output"].append(A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(A )
SCREAMING_SNAKE_CASE : Optional[Any] = 0
while q:
SCREAMING_SNAKE_CASE : List[Any] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.adlist[r]['fail_state']
while (
self.find_next_state(A, self.adlist[child]['value'] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE : str = self.adlist[state]['fail_state']
SCREAMING_SNAKE_CASE : int = self.find_next_state(
A, self.adlist[child]['value'] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : int = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : dict = {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i in range(len(A ) ):
while (
self.find_next_state(A, string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.adlist[current_state]['fail_state']
SCREAMING_SNAKE_CASE : Tuple = self.find_next_state(A, string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE : Any = 0
else:
SCREAMING_SNAKE_CASE : Dict = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE : Dict = []
result[key].append(i - len(A ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | """simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Tuple = 'efficientnet'
def __init__( self : int , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 600 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 3.1 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase : List[int] = [] , __lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase : float = 0.25 , __lowerCAmelCase : str = "swish" , __lowerCAmelCase : int = 2560 , __lowerCAmelCase : str = "mean" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 0.001 , __lowerCAmelCase : float = 0.99 , __lowerCAmelCase : float = 0.5 , __lowerCAmelCase : float = 0.2 , **__lowerCAmelCase : Optional[Any] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = width_coefficient
_UpperCAmelCase = depth_coefficient
_UpperCAmelCase = depth_divisor
_UpperCAmelCase = kernel_sizes
_UpperCAmelCase = in_channels
_UpperCAmelCase = out_channels
_UpperCAmelCase = depthwise_padding
_UpperCAmelCase = strides
_UpperCAmelCase = num_block_repeats
_UpperCAmelCase = expand_ratios
_UpperCAmelCase = squeeze_expansion_ratio
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = pooling_type
_UpperCAmelCase = initializer_range
_UpperCAmelCase = batch_norm_eps
_UpperCAmelCase = batch_norm_momentum
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = drop_connect_rate
_UpperCAmelCase = sum(__lowerCAmelCase ) * 4
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return 1e-5
| 277 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case :List[str] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__snake_case :Union[str, Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = state_dict.pop(_UpperCAmelCase )
__a = val
def __snake_case ( _UpperCAmelCase ):
__a = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__a = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
__a = value
else:
__a = value
return new_state_dict
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False ):
__a = ''''''
if is_panoptic:
__a = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__a = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
__a = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__a = in_proj_weight[:256, :]
__a = in_proj_bias[:256]
__a = in_proj_weight[256:512, :]
__a = in_proj_bias[256:512]
__a = in_proj_weight[-256:, :]
__a = in_proj_bias[-256:]
def __snake_case ( ):
__a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__a = '''resnet101'''
if "dc5" in model_name:
__a = True
__a = '''panoptic''' in model_name
if is_panoptic:
__a = 250
else:
__a = 91
__a = '''huggingface/label-files'''
__a = '''coco-detection-id2label.json'''
__a = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
# load image processor
__a = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__a = ConditionalDetrImageProcessor(format=_UpperCAmelCase )
# prepare image
__a = prepare_img()
__a = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
__a = encoding['''pixel_values''']
logger.info(f'Converting model {model_name}...' )
# load original model from torch hub
__a = torch.hub.load('''DeppMeng/ConditionalDETR''' , _UpperCAmelCase , pretrained=_UpperCAmelCase ).eval()
__a = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__a = '''conditional_detr.''' + src
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__a = rename_backbone_keys(_UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_UpperCAmelCase , is_panoptic=_UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__a = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
__a = state_dict.pop(_UpperCAmelCase )
__a = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__a = state_dict.pop(_UpperCAmelCase )
__a = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
__a = state_dict.pop(_UpperCAmelCase )
__a = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
__a = state_dict.pop(_UpperCAmelCase )
__a = val
# finally, create HuggingFace model and load state dict
__a = ConditionalDetrForSegmentation(_UpperCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
model.push_to_hub(repo_id=_UpperCAmelCase , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
__a = conditional_detr(_UpperCAmelCase )
__a = model(_UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__snake_case :List[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 60 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__snake_case :int = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class _A ( tr.AbstractTransform ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str = " "):
'''simple docstring'''
__a = sentence_delimiter
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return list(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = []
for sent_idx, sentence in enumerate(__SCREAMING_SNAKE_CASE):
chars.extend(self.process_string(__SCREAMING_SNAKE_CASE))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__SCREAMING_SNAKE_CASE) - 1:
chars.append(self.sentence_delimiter)
return chars
__snake_case :Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__snake_case :Optional[int] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__snake_case :Optional[int] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__snake_case :Tuple = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
__snake_case :Tuple = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=False):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )["wer"]
__a = 0
__a = 0
for prediction, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 60 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCAmelCase = random.Random()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=1.0 , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
__UpperCAmelCase : str = global_rng
__UpperCAmelCase : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=4_0_0 , lowercase__=2_0_0_0 , lowercase__=2_0_4_8 , lowercase__=1_2_8 , lowercase__=1 , lowercase__=5_1_2 , lowercase__=3_0 , lowercase__=4_4_1_0_0 , ):
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : int = min_seq_length
__UpperCAmelCase : List[str] = max_seq_length
__UpperCAmelCase : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCAmelCase : Any = spectrogram_length
__UpperCAmelCase : List[Any] = feature_size
__UpperCAmelCase : Union[str, Any] = num_audio_channels
__UpperCAmelCase : Optional[int] = hop_length
__UpperCAmelCase : Tuple = chunk_length
__UpperCAmelCase : Any = sampling_rate
def A( self):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def A( self , lowercase__=False , lowercase__=False):
def _flatten(lowercase__):
return list(itertools.chain(*lowercase__))
if equal_length:
__UpperCAmelCase : str = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
__UpperCAmelCase : List[str] = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
__UpperCAmelCase : List[str] = [np.asarray(lowercase__) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Optional[int] = TvltFeatureExtractor
def A( self):
__UpperCAmelCase : Dict = TvltFeatureExtractionTester(self)
def A( self):
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowercase__ , '''spectrogram_length'''))
self.assertTrue(hasattr(lowercase__ , '''feature_size'''))
self.assertTrue(hasattr(lowercase__ , '''num_audio_channels'''))
self.assertTrue(hasattr(lowercase__ , '''hop_length'''))
self.assertTrue(hasattr(lowercase__ , '''chunk_length'''))
self.assertTrue(hasattr(lowercase__ , '''sampling_rate'''))
def A( self):
__UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : str = feat_extract_first.save_pretrained(lowercase__)[0]
check_json_file_has_correct_format(lowercase__)
__UpperCAmelCase : Optional[int] = self.feature_extraction_class.from_pretrained(lowercase__)
__UpperCAmelCase : List[Any] = feat_extract_first.to_dict()
__UpperCAmelCase : Union[str, Any] = feat_extract_second.to_dict()
__UpperCAmelCase : Union[str, Any] = dict_first.pop('''mel_filters''')
__UpperCAmelCase : Union[str, Any] = dict_second.pop('''mel_filters''')
self.assertTrue(np.allclose(lowercase__ , lowercase__))
self.assertEqual(lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Union[str, Any] = os.path.join(lowercase__ , '''feat_extract.json''')
feat_extract_first.to_json_file(lowercase__)
__UpperCAmelCase : str = self.feature_extraction_class.from_json_file(lowercase__)
__UpperCAmelCase : Any = feat_extract_first.to_dict()
__UpperCAmelCase : Union[str, Any] = feat_extract_second.to_dict()
__UpperCAmelCase : Tuple = dict_first.pop('''mel_filters''')
__UpperCAmelCase : List[str] = dict_second.pop('''mel_filters''')
self.assertTrue(np.allclose(lowercase__ , lowercase__))
self.assertEqual(lowercase__ , lowercase__)
def A( self):
# Initialize feature_extractor
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
# create three inputs of length 800, 1000, and 1200
__UpperCAmelCase : Optional[int] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
__UpperCAmelCase : int = [np.asarray(lowercase__) for speech_input in speech_inputs]
# Test not batched input
__UpperCAmelCase : Dict = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test batched
__UpperCAmelCase : List[str] = feature_extractor(lowercase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test audio masking
__UpperCAmelCase : Tuple = feature_extractor(
lowercase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=lowercase__).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test 2-D numpy arrays are batched.
__UpperCAmelCase : Any = [floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__UpperCAmelCase : Optional[Any] = np.asarray(lowercase__)
__UpperCAmelCase : Tuple = feature_extractor(lowercase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
def A( self , lowercase__):
__UpperCAmelCase : Optional[int] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''')
# automatic decoding with librispeech
__UpperCAmelCase : int = ds.sort('''id''').select(range(lowercase__))[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def A( self):
__UpperCAmelCase : Optional[Any] = self._load_datasamples(1)
__UpperCAmelCase : Tuple = TvltFeatureExtractor()
__UpperCAmelCase : Tuple = feature_extractor(lowercase__ , return_tensors='''pt''').audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8))
__UpperCAmelCase : int = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]])
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowercase__ , atol=1e-4))
| 462 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCAmelCase = datasets.logging.get_logger(__name__)
lowerCAmelCase = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
lowerCAmelCase = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
lowerCAmelCase = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=False , lowercase_=False , lowercase_=True , lowercase_=False , lowercase_="dummy_doc" ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : List[str] = {doc: key_lines}
__UpperCAmelCase : Tuple = {doc: sys_lines}
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : str = 0
__UpperCAmelCase : int = 0
__UpperCAmelCase : int = 0
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : Any = 0
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = reader.get_doc_mentions(lowercase_ , key_doc_lines[doc] , lowercase_ )
key_singletons_num += singletons_num
if NP_only or min_span:
__UpperCAmelCase : List[Any] = reader.set_annotated_parse_trees(lowercase_ , key_doc_lines[doc] , lowercase_ , lowercase_ )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = reader.get_doc_mentions(lowercase_ , sys_doc_lines[doc] , lowercase_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__UpperCAmelCase : Union[str, Any] = reader.set_annotated_parse_trees(lowercase_ , key_doc_lines[doc] , lowercase_ , lowercase_ )
if remove_nested:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = reader.remove_nested_coref_mentions(lowercase_ , lowercase_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__UpperCAmelCase , __UpperCAmelCase : str = reader.remove_nested_coref_mentions(lowercase_ , lowercase_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__UpperCAmelCase : List[Any] = reader.get_mention_assignments(lowercase_ , lowercase_ )
__UpperCAmelCase : Optional[Any] = reader.get_mention_assignments(lowercase_ , lowercase_ )
__UpperCAmelCase : Dict = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = get_coref_infos(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : str = 0
for name, metric in metrics:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = evaluator.evaluate_documents(lowercase_ , lowercase_ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , f"Recall: {recall * 100:.2f}" , f" Precision: {precision * 100:.2f}" , f" F1: {fa * 100:.2f}" , )
if conll_subparts_num == 3:
__UpperCAmelCase : Optional[int] = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
__UpperCAmelCase : List[Any] = line.split()[5]
if not parse_col == "-":
__UpperCAmelCase : Optional[Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''')),
'''references''': datasets.Sequence(datasets.Value('''string''')),
}) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def A( self , lowercase__ , lowercase__ , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False):
__UpperCAmelCase : List[Any] = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
__UpperCAmelCase : Optional[Any] = util.check_gold_parse_annotation(lowercase__)
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''')
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__UpperCAmelCase : Tuple = evaluate(
key_lines=lowercase__ , sys_lines=lowercase__ , metrics=lowercase__ , NP_only=lowercase__ , remove_nested=lowercase__ , keep_singletons=lowercase__ , min_span=lowercase__ , )
return score
| 462 | 1 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_lowerCamelCase : Tuple = 6_3_7_8_1_3_7.0
_lowerCamelCase : Dict = 6_3_5_6_7_5_2.3_1_4_2_4_5
_lowerCamelCase : int = 6_3_7_8_1_3_7
def _UpperCAmelCase (UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_lowerCAmelCase : List[str] = atan((1 - flattening) * tan(radians(UpperCamelCase_ ) ) )
_lowerCAmelCase : int = atan((1 - flattening) * tan(radians(UpperCamelCase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_lowerCAmelCase : str = haversine_distance(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_lowerCAmelCase : Union[str, Any] = (b_lata + b_lata) / 2
_lowerCAmelCase : List[Any] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_lowerCAmelCase : Optional[Any] = (sin(UpperCamelCase_ ) ** 2) * (cos(UpperCamelCase_ ) ** 2)
_lowerCAmelCase : Optional[Any] = cos(sigma / 2 ) ** 2
_lowerCAmelCase : Optional[int] = (sigma - sin(UpperCamelCase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_lowerCAmelCase : Optional[int] = (cos(UpperCamelCase_ ) ** 2) * (sin(UpperCamelCase_ ) ** 2)
_lowerCAmelCase : Optional[int] = sin(sigma / 2 ) ** 2
_lowerCAmelCase : Dict = (sigma + sin(UpperCamelCase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_lowerCamelCase : Dict = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
_lowerCamelCase : Union[str, Any] = dataset.iloc[:, 1:2].values
_lowerCamelCase : Any = dataset.iloc[:, 2].values
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = train_test_split(X, y, test_size=0.2, random_state=0)
_lowerCamelCase : Optional[Any] = PolynomialFeatures(degree=4)
_lowerCamelCase : Optional[Any] = poly_reg.fit_transform(X)
_lowerCamelCase : Dict = LinearRegression()
pol_reg.fit(X_poly, y)
def _UpperCAmelCase ():
'''simple docstring'''
plt.scatter(UpperCamelCase_ , UpperCamelCase_ , color="""red""" )
plt.plot(UpperCamelCase_ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase_ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 196 | 1 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCAmelCase( __lowerCamelCase ):
def wrapper(*__lowerCamelCase , **__lowerCamelCase ):
__a = timeit.default_timer()
__a = func(*__lowerCamelCase , **__lowerCamelCase )
__a = timeit.default_timer() - starttime
return delta
__a = func.__name__
return wrapper
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase=100 , __lowerCamelCase=None ):
__a = []
__a = seq_shapes or {}
for i in range(__lowerCamelCase ):
__a = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__lowerCamelCase , _ArrayXD ):
__a = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__lowerCamelCase , datasets.Value ):
if v.dtype == "string":
__a = 'The small grey turtle was surprisingly fast when challenged.'
else:
__a = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__lowerCamelCase , datasets.Sequence ):
while isinstance(__lowerCamelCase , datasets.Sequence ):
__a = v.feature
__a = seq_shapes[k]
__a = np.random.rand(*__lowerCamelCase ).astype(v.dtype )
__a = data
dummy_data.append((i, example) )
return dummy_data
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=100 , __lowerCamelCase=None ):
__a = generate_examples(__lowerCamelCase , num_examples=__lowerCamelCase , seq_shapes=__lowerCamelCase )
with ArrowWriter(features=__lowerCamelCase , path=__lowerCamelCase ) as writer:
for key, record in dummy_data:
__a = features.encode_example(__lowerCamelCase )
writer.write(__lowerCamelCase )
__a , __a = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
__a = datasets.Dataset.from_file(filename=__lowerCamelCase , info=datasets.DatasetInfo(features=__lowerCamelCase ) )
return dataset
| 559 | import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def lowerCAmelCase( __lowerCamelCase ):
__a = []
for line in lines:
__a = re.sub(r'#.*' , '' , __lowerCamelCase ) # remove comments
if line:
filtered_lines.append(__lowerCamelCase )
__a = '\n'.join(__lowerCamelCase )
# Make a hash from all this code
__a = full_str.encode('utf-8' )
return shaaaa(__lowerCamelCase ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase_ : int = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase_ : List[Any] = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase_ : int = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
lowerCamelCase_ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 559 | 1 |
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : list[list[int]] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : set ):
__a , __a = len(_lowerCAmelCase ), len(grid[0] )
if (
min(_lowerCAmelCase , _lowerCAmelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__a = 0
count += depth_first_search(_lowerCAmelCase , row + 1 , _lowerCAmelCase , _lowerCAmelCase )
count += depth_first_search(_lowerCAmelCase , row - 1 , _lowerCAmelCase , _lowerCAmelCase )
count += depth_first_search(_lowerCAmelCase , _lowerCAmelCase , col + 1 , _lowerCAmelCase )
count += depth_first_search(_lowerCAmelCase , _lowerCAmelCase , col - 1 , _lowerCAmelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173 | """simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
__A = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
__A = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results['matthews_correlation'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results['matthews_correlation'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results['matthews_correlation'], 2))
-0.25
"""
__A = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def lowerCAmelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple=None ) -> Dict:
return {
"matthews_correlation": float(matthews_corrcoef(lowerCamelCase_ , lowerCamelCase_ , sample_weight=lowerCamelCase_ ) ),
}
| 173 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
A_ = "\nHuman: <<task>>\n\nAssistant: "
A_ = "huggingface-tools/default-prompts"
A_ = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def A_ ( snake_case , snake_case , snake_case="run" ):
if prompt_or_repo_id is None:
SCREAMING_SNAKE_CASE:str = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , snake_case ) is not None:
return prompt_or_repo_id
SCREAMING_SNAKE_CASE:str = cached_file(
snake_case , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(snake_case , "r" , encoding="utf-8" ) as f:
return f.read()
| 143 |
'''simple docstring'''
def A_ ( snake_case , snake_case ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
SCREAMING_SNAKE_CASE:int = str(bin(snake_case ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE:Dict = str(bin(snake_case ) )[2:]
SCREAMING_SNAKE_CASE:List[Any] = max(len(snake_case ) , len(snake_case ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(snake_case ) , b_binary.zfill(snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143 | 1 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__a : Optional[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def _SCREAMING_SNAKE_CASE ( __lowercase : List[str] , __lowercase : tuple , __lowercase : Path , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : str=False , ) -> List[Any]:
"""simple docstring"""
output_path.parent.mkdir(parents=__lowercase , exist_ok=__lowercase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__lowercase , __lowercase , f=output_path.as_posix() , input_names=__lowercase , output_names=__lowercase , dynamic_axes=__lowercase , do_constant_folding=__lowercase , use_external_data_format=__lowercase , enable_onnx_checker=__lowercase , opset_version=__lowercase , )
else:
export(
__lowercase , __lowercase , f=output_path.as_posix() , input_names=__lowercase , output_names=__lowercase , dynamic_axes=__lowercase , do_constant_folding=__lowercase , opset_version=__lowercase , )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : str , __lowercase : int , __lowercase : bool = False ) -> str:
"""simple docstring"""
__A = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__A = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
__A = """cpu"""
__A = Path(__lowercase )
# VAE DECODER
__A = AutoencoderKL.from_pretrained(model_path + """/vae""" )
__A = vae_decoder.config.latent_channels
# forward only through the decoder part
__A = vae_decoder.decode
onnx_export(
__lowercase , model_args=(
torch.randn(1 , __lowercase , 2_5 , 2_5 ).to(device=__lowercase , dtype=__lowercase ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__lowercase , )
del vae_decoder
if __name__ == "__main__":
__a : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
__a : Tuple = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 700 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : float | Decimal , __lowercase : float = 1_0**-1_0 ) -> float:
"""simple docstring"""
__A = a
while True:
__A = Decimal(__lowercase ) - (
Decimal(eval(__lowercase ) ) / Decimal(eval(str(diff(__lowercase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__lowercase ) ) < precision: # noqa: S307
return float(__lowercase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
print(f"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""")
# Find Square Root of 5
print(f"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""")
# Exponential Roots
print(f"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""")
| 199 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"vocab_file": "spm_char.model"}
__A = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
__A = {
"microsoft/speecht5_asr": 1_024,
"microsoft/speecht5_tts": 1_024,
"microsoft/speecht5_vc": 1_024,
}
class A ( __UpperCAmelCase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCamelCase__ , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> None:
'''simple docstring'''
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
@property
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A__ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
lowercase__ = self.sp_model.IdToPiece(lowerCamelCase__ )
return token
def A__ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
lowercase__ = []
lowercase__ = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
lowercase__ = []
else:
current_sub_tokens.append(lowerCamelCase__ )
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def A__ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
lowercase__ = [1]
if token_ids_a is None:
return ([0] * len(lowerCamelCase__ )) + suffix_ones
return ([0] * len(lowerCamelCase__ )) + ([0] * len(lowerCamelCase__ )) + suffix_ones
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , """wb""" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 325 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = DebertaTokenizer
lowerCamelCase : Any = True
lowerCamelCase : Dict = DebertaTokenizerFast
def A__ ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
lowercase__ = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
lowercase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase__ = {"""unk_token""": """[UNK]"""}
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase__ ) )
def A__ ( self , **lowerCamelCase__ ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
lowercase__ = """lower newer"""
lowercase__ = """lower newer"""
return input_text, output_text
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = """lower newer"""
lowercase__ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowercase__ = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ = tokens + [tokenizer.unk_token]
lowercase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = tokenizer("""Hello""" , """World""" )
lowercase__ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , lowerCamelCase__ )
@slow
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
lowercase__ = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCamelCase__ )
lowercase__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCamelCase__ )
lowercase__ = tokenizer.encode(
"""sequence builders""" , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
lowercase__ = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowercase__ = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
lowercase__ = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
lowercase__ = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ )
lowercase__ = [tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ ) for seq in encoding["""input_ids"""]]
# fmt: off
lowercase__ = {
"""input_ids""": [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowercase__ = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , lowerCamelCase__ )
for expected, decoded in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 325 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = parent
SCREAMING_SNAKE_CASE_ : str = batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = seq_length
SCREAMING_SNAKE_CASE_ : Any = is_training
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_input_mask
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : str = projection_dim
SCREAMING_SNAKE_CASE_ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = intermediate_size
SCREAMING_SNAKE_CASE_ : int = dropout
SCREAMING_SNAKE_CASE_ : List[str] = attention_dropout
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scope
SCREAMING_SNAKE_CASE_ : Union[str, Any] = bos_token_id
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : int = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
SCREAMING_SNAKE_CASE_ : int = input_mask.numpy()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_mask.shape
SCREAMING_SNAKE_CASE_ : Optional[int] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(A_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Dict = self.get_config()
return config, input_ids, tf.convert_to_tensor(A_ )
def UpperCAmelCase ( self ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = TFBlipTextModel(config=A_ )
SCREAMING_SNAKE_CASE_ : int = model(A_ , attention_mask=A_ , training=A_ )
SCREAMING_SNAKE_CASE_ : List[str] = model(A_ , training=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = config_and_inputs
SCREAMING_SNAKE_CASE_ : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _A ( _lowercase , unittest.TestCase):
SCREAMING_SNAKE_CASE : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[str] = False
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = BlipTextModelTester(self )
SCREAMING_SNAKE_CASE_ : str = ConfigTester(self , config_class=A_ , hidden_size=37 )
def UpperCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : List[Any] = TFBlipTextModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=A_ )
| 700 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _A ( unittest.TestCase):
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
SCREAMING_SNAKE_CASE_ : Any = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(_SCREAMING_SNAKE_CASE )
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : str = load_dataset('nielsr/rvlcdip-demo' )
SCREAMING_SNAKE_CASE_ : List[Any] = dataset['train'][0]['image'].convert('RGB' )
SCREAMING_SNAKE_CASE_ : List[Any] = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : int = model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = outputs.logits
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Size((1, 16) )
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=_SCREAMING_SNAKE_CASE , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 353 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> List[List[ImageInput]]:
if isinstance(__UpperCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__UpperCamelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__UpperCamelCase ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Any = ["pixel_values"]
def __init__( self : int , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : bool = True , _snake_case : Union[int, float] = 1 / 2_55 , _snake_case : bool = True , _snake_case : bool = True , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , **_snake_case : Tuple , ):
"""simple docstring"""
super().__init__(**_snake_case )
A__ = size if size is not None else {'shortest_edge': 2_56}
A__ = get_size_dict(_snake_case , default_to_square=_snake_case )
A__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
A__ = get_size_dict(_snake_case , param_name='crop_size' )
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = resample
A__ = do_rescale
A__ = rescale_factor
A__ = offset
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _a ( self : Dict , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : int , ):
"""simple docstring"""
A__ = get_size_dict(_snake_case , default_to_square=_snake_case )
if "shortest_edge" in size:
A__ = get_resize_output_image_size(_snake_case , size['shortest_edge'] , default_to_square=_snake_case )
elif "height" in size and "width" in size:
A__ = (size['height'], size['width'])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case )
def _a ( self : Any , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : str , ):
"""simple docstring"""
A__ = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_snake_case , size=(size['height'], size['width']) , data_format=_snake_case , **_snake_case )
def _a ( self : Optional[Any] , _snake_case : np.ndarray , _snake_case : Union[int, float] , _snake_case : bool = True , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : str , ):
"""simple docstring"""
A__ = image.astype(np.floataa )
if offset:
A__ = image - (scale / 2)
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def _a ( self : Optional[int] , _snake_case : np.ndarray , _snake_case : Union[float, List[float]] , _snake_case : Union[float, List[float]] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Tuple , ):
"""simple docstring"""
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def _a ( self : List[Any] , _snake_case : ImageInput , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = None , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : bool = None , _snake_case : float = None , _snake_case : bool = None , _snake_case : bool = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
A__ = to_numpy_array(_snake_case )
if do_resize:
A__ = self.resize(image=_snake_case , size=_snake_case , resample=_snake_case )
if do_center_crop:
A__ = self.center_crop(_snake_case , size=_snake_case )
if do_rescale:
A__ = self.rescale(image=_snake_case , scale=_snake_case , offset=_snake_case )
if do_normalize:
A__ = self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case )
A__ = to_channel_dimension_format(_snake_case , _snake_case )
return image
def _a ( self : Union[str, Any] , _snake_case : ImageInput , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = None , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : bool = None , _snake_case : float = None , _snake_case : bool = None , _snake_case : bool = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : ChannelDimension = ChannelDimension.FIRST , **_snake_case : str , ):
"""simple docstring"""
A__ = do_resize if do_resize is not None else self.do_resize
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = offset if offset is not None else self.offset
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = size if size is not None else self.size
A__ = get_size_dict(_snake_case , default_to_square=_snake_case )
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(_snake_case , param_name='crop_size' )
if not valid_images(_snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
A__ = make_batched(_snake_case )
A__ = [
[
self._preprocess_image(
image=_snake_case , do_resize=_snake_case , size=_snake_case , resample=_snake_case , do_center_crop=_snake_case , crop_size=_snake_case , do_rescale=_snake_case , rescale_factor=_snake_case , offset=_snake_case , do_normalize=_snake_case , image_mean=_snake_case , image_std=_snake_case , data_format=_snake_case , )
for img in video
]
for video in videos
]
A__ = {'pixel_values': videos}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 9 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207 | 0 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
lowerCamelCase__ = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
lowerCamelCase__ = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
lowerCamelCase__ = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
lowerCamelCase__ = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
lowerCamelCase__ = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def lowercase_ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
for tf_name, hf_name in patterns:
snake_case__ : List[str] =k.replace(_UpperCAmelCase , _UpperCAmelCase )
return k
def lowercase_ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
snake_case__ : Union[str, Any] =BigBirdPegasusConfig(**_UpperCAmelCase )
snake_case__ : Optional[Any] =BigBirdPegasusForConditionalGeneration(_UpperCAmelCase )
snake_case__ : Union[str, Any] =torch_model.state_dict()
snake_case__ : Tuple ={}
# separating decoder weights
snake_case__ : Dict ={k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
snake_case__ : Optional[Any] ={k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
snake_case__ : Union[str, Any] =[k.endswith(_UpperCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCAmelCase ):
continue
snake_case__ : Any =DECODER_PATTERNS
snake_case__ : Union[str, Any] =rename_state_dict_key(_UpperCAmelCase , _UpperCAmelCase )
if new_k not in state_dict:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
snake_case__ : List[Any] =v.T
snake_case__ : str =torch.from_numpy(_UpperCAmelCase )
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
snake_case__ : Union[str, Any] =[k.endswith(_UpperCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCAmelCase ):
continue
snake_case__ : Dict =REMAINING_PATTERNS
snake_case__ : int =rename_state_dict_key(_UpperCAmelCase , _UpperCAmelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
snake_case__ : Any =v.T
snake_case__ : Optional[Any] =torch.from_numpy(_UpperCAmelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
snake_case__ : Optional[int] =mapping['''model.embed_positions.weight''']
snake_case__ : int =mapping.pop('''model.embed_positions.weight''' )
snake_case__, snake_case__ : Union[str, Any] =torch_model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
snake_case__ : List[Any] =[
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def lowercase_ ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
snake_case__ : Optional[int] =tf.train.list_variables(_UpperCAmelCase )
snake_case__ : Optional[Any] ={}
snake_case__ : str =['''global_step''']
for name, shape in tqdm(_UpperCAmelCase , desc='''converting tf checkpoint to dict''' ):
snake_case__ : Optional[int] =any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case__ : str =tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase )
snake_case__ : str =array
return tf_weights
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
snake_case__ : Optional[Any] =get_tf_weights_as_numpy(_UpperCAmelCase )
snake_case__ : Optional[int] =convert_bigbird_pegasus(_UpperCAmelCase , _UpperCAmelCase )
torch_model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 716 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowercase_ ( SCREAMING_SNAKE_CASE : str = "laptop" ):
"""simple docstring"""
snake_case__ : Dict =F'''https://www.amazon.in/laptop/s?k={product}'''
snake_case__ : List[str] ={
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
snake_case__ : Optional[Any] =BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE , headers=SCREAMING_SNAKE_CASE ).text )
# Initialize a Pandas dataframe with the column titles
snake_case__ : int =DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
snake_case__ : str =item.ha.text
snake_case__ : str ='''https://www.amazon.in/''' + item.ha.a['''href''']
snake_case__ : Tuple =item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
snake_case__ : Dict =item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
snake_case__ : Any ='''Not available'''
try:
snake_case__ : int =(
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
snake_case__ : Optional[int] =''''''
try:
snake_case__ : Dict =float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 1_00 )
except ValueError:
snake_case__ : Tuple =float('''nan''' )
except AttributeError:
pass
snake_case__ : List[Any] =[
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case__ : Tuple =''' '''
snake_case__ : Any =''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCamelCase__ = '''headphones'''
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 408 | 0 |
from __future__ import annotations
class a :
'''simple docstring'''
def __init__( self : Optional[Any] , __snake_case : list[list[int]] ):
UpperCAmelCase_ = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(_lowerCAmelCase ) != 0:
UpperCAmelCase_ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_lowerCAmelCase ) != cols:
raise error
for value in row:
if not isinstance(_lowerCAmelCase , (int, float) ):
raise error
UpperCAmelCase_ = rows
else:
UpperCAmelCase_ = []
def lowerCamelCase_ ( self : int ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowerCamelCase_ ( self : Any ):
return len(self.rows )
@property
def lowerCamelCase_ ( self : Dict ):
return len(self.rows[0] )
@property
def lowerCamelCase_ ( self : int ):
return (self.num_rows, self.num_columns)
@property
def lowerCamelCase_ ( self : str ):
return self.order[0] == self.order[1]
def lowerCamelCase_ ( self : Dict ):
UpperCAmelCase_ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_lowerCAmelCase )
def lowerCamelCase_ ( self : int ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
return bool(self.determinant() )
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : int , __snake_case : int ):
UpperCAmelCase_ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_lowerCAmelCase ).determinant()
def lowerCamelCase_ ( self : int , __snake_case : int , __snake_case : int ):
if (row + column) % 2 == 0:
return self.get_minor(_lowerCAmelCase , _lowerCAmelCase )
return -1 * self.get_minor(_lowerCAmelCase , _lowerCAmelCase )
def lowerCamelCase_ ( self : Optional[int] ):
return Matrix(
[
[self.get_minor(_lowerCAmelCase , _lowerCAmelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowerCamelCase_ ( self : Union[str, Any] ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowerCamelCase_ ( self : List[Any] ):
UpperCAmelCase_ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_lowerCAmelCase )
def lowerCamelCase_ ( self : List[Any] ):
UpperCAmelCase_ = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self : Optional[int] ):
return str(self.rows )
def __str__( self : Union[str, Any] ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(_lowerCAmelCase ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : list[int] , __snake_case : int | None = None ):
UpperCAmelCase_ = TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise type_error
for value in row:
if not isinstance(_lowerCAmelCase , (int, float) ):
raise type_error
if len(_lowerCAmelCase ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(_lowerCAmelCase )
else:
UpperCAmelCase_ = self.rows[0:position] + [row] + self.rows[position:]
def lowerCamelCase_ ( self : str , __snake_case : list[int] , __snake_case : int | None = None ):
UpperCAmelCase_ = TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise type_error
for value in column:
if not isinstance(_lowerCAmelCase , (int, float) ):
raise type_error
if len(_lowerCAmelCase ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
UpperCAmelCase_ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase_ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Union[str, Any] , __snake_case : object ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : List[str] , __snake_case : object ):
return not self == other
def __neg__( self : Union[str, Any] ):
return self * -1
def __add__( self : Tuple , __snake_case : Matrix ):
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Any , __snake_case : Matrix ):
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : str , __snake_case : Matrix | int | float ):
if isinstance(_lowerCAmelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(_lowerCAmelCase , _lowerCAmelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self : Optional[int] , __snake_case : int ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
UpperCAmelCase_ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowerCamelCase_ ( cls : List[Any] , __snake_case : list[int] , __snake_case : list[int] ):
return sum(row[i] * column[i] for i in range(len(_lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 144 |
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] , lowerCAmelCase_ :int )->Optional[int]:
'''simple docstring'''
snake_case_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Dict )->Dict:
'''simple docstring'''
snake_case_ = 0
while b > 0:
if b & 1:
snake_case_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 283 | 0 |
def a__ (__lowercase :int = 1000 ) -> int:
_A : Optional[Any] = 1, 1
_A : Optional[int] = 2
while True:
_A : Optional[int] = 0
_A : str = fa + fa
_A : Optional[int] = fa, f
index += 1
for _ in str(__lowercase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 713 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase : Dict =logging.get_logger(__name__)
_UpperCamelCase : Optional[Any] ={
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Any = "xmod"
def __init__( self ,A__=30522 ,A__=768 ,A__=12 ,A__=12 ,A__=3072 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=512 ,A__=2 ,A__=0.02 ,A__=1E-12 ,A__=1 ,A__=0 ,A__=2 ,A__="absolute" ,A__=True ,A__=None ,A__=False ,A__=2 ,A__=False ,A__=True ,A__=True ,A__=("en_XX",) ,A__=None ,**A__ ,):
super().__init__(pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,**A__ )
_A : Union[str, Any] = vocab_size
_A : List[str] = hidden_size
_A : Union[str, Any] = num_hidden_layers
_A : str = num_attention_heads
_A : Tuple = hidden_act
_A : Optional[int] = intermediate_size
_A : List[str] = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Dict = max_position_embeddings
_A : Optional[int] = type_vocab_size
_A : List[str] = initializer_range
_A : Tuple = layer_norm_eps
_A : int = position_embedding_type
_A : str = use_cache
_A : int = classifier_dropout
_A : Optional[Any] = pre_norm
_A : Dict = adapter_reduction_factor
_A : List[Any] = adapter_layer_norm
_A : Optional[Any] = adapter_reuse_layer_norm
_A : Optional[Any] = ln_before_adapter
_A : int = list(A__ )
_A : str = default_language
class UpperCAmelCase__ ( __snake_case ):
@property
def A__ ( self ):
if self.task == "multiple-choice":
_A : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_A : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 332 | 0 |
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> None:
lowercase : Dict = generate_pascal_triangle(SCREAMING_SNAKE_CASE__ )
for row_idx in range(SCREAMING_SNAKE_CASE__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> list[list[int]]:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
lowercase : list[list[int]] = []
for current_row_idx in range(SCREAMING_SNAKE_CASE__ ):
lowercase : int = populate_current_row(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
triangle.append(SCREAMING_SNAKE_CASE__ )
return triangle
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> list[int]:
lowercase : Any = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowercase , lowercase : Union[str, Any] = 1, 1
for current_col_idx in range(1 , SCREAMING_SNAKE_CASE__ ):
calculate_current_element(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return current_row
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> None:
lowercase : List[str] = triangle[current_row_idx - 1][current_col_idx - 1]
lowercase : List[Any] = triangle[current_row_idx - 1][current_col_idx]
lowercase : List[Any] = above_to_left_elt + above_to_right_elt
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> list[list[int]]:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
lowercase : list[list[int]] = [[1]]
for row_index in range(1 , SCREAMING_SNAKE_CASE__ ):
lowercase : str = [0] + result[-1] + [0]
lowercase : Optional[int] = row_index + 1
# Calculate the number of distinct elements in a row
lowercase : str = sum(divmod(SCREAMING_SNAKE_CASE__ , 2 ) )
lowercase : Any = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
lowercase : Optional[int] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowercase : List[str] = row_first_half + row_second_half
result.append(SCREAMING_SNAKE_CASE__ )
return result
def _snake_case( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
lowercase : Optional[Any] = f"{func.__name__}({value})"
lowercase : Dict = timeit(f"__main__.{call}" , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 336 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowercase : List[str] = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir ,"""schedulers/""" ) )
lowercase : Any = self.diffusers_dir
shutil.copy(
os.path.join(snake_case ,"""src/diffusers/schedulers/scheduling_ddpm.py""" ) ,os.path.join(self.diffusers_dir ,"""schedulers/scheduling_ddpm.py""" ) ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case=None ):
'''simple docstring'''
lowercase : Optional[int] = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
lowercase : Optional[Any] = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
lowercase : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 )
lowercase : int = black.format_str(snake_case ,mode=snake_case )
lowercase : int = os.path.join(self.diffusers_dir ,"""new_code.py""" )
with open(snake_case ,"""w""" ,newline="""\n""" ) as f:
f.write(snake_case )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=snake_case )
with open(snake_case ,"""r""" ) as f:
self.assertTrue(f.read() ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ,"""DDPMSchedulerOutput""" ,REFERENCE_CODE + """\n""" ,)
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ,"""DDPMSchedulerOutput""" ,snake_case ,)
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" ,"""TestSchedulerOutput""" ,re.sub("""DDPM""" ,"""Test""" ,snake_case ) ,)
# Copy consistency with a really long name
lowercase : Dict = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" ,f"{long_class_name}SchedulerOutput" ,re.sub("""Bert""" ,snake_case ,snake_case ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" ,"""TestSchedulerOutput""" ,snake_case ,overwrite_result=re.sub("""DDPM""" ,"""Test""" ,snake_case ) ,)
| 336 | 1 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : Dict = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : Optional[Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : int
snake_case__ : Node | None
class _SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __lowerCamelCase : Iterable[int] ):
UpperCamelCase :Node | None = None
for i in sorted(__lowerCamelCase , reverse=__lowerCamelCase ):
UpperCamelCase :List[Any] = Node(__lowerCamelCase , self.head )
def __iter__( self : int ):
UpperCamelCase :List[str] = self.head
while node:
yield node.data
UpperCamelCase :Tuple = node.next_node
def __len__( self : Tuple ):
return sum(1 for _ in self )
def __str__( self : List[Any] ):
return " -> ".join([str(__lowerCamelCase ) for node in self] )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : SortedLinkedList , __magic_name__ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(__magic_name__ ) + list(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 590 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase_ : Any = sys.version_info >= (3, 10)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str=None , __magic_name__ : Any=None ) -> Any:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=__magic_name__ )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : int
snake_case__ : float
snake_case__ : str
snake_case__ : bool
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : int = 4_2
snake_case__ : str = field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : bool = False
snake_case__ : bool = True
snake_case__ : Optional[bool] = None
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Optional[Any] = """titi"""
snake_case__ : Optional[Any] = """toto"""
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : str = """titi"""
snake_case__ : Tuple = """toto"""
snake_case__ : Tuple = 4_2
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : BasicEnum = "toto"
def _A ( self : str ):
UpperCamelCase :List[Any] = BasicEnum(self.foo )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : MixedTypeEnum = "toto"
def _A ( self : str ):
UpperCamelCase :List[str] = MixedTypeEnum(self.foo )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : Optional[int] = None
snake_case__ : Optional[float] = field(default=_a , metadata={"""help""": """help message"""} )
snake_case__ : Optional[str] = None
snake_case__ : Optional[List[str]] = list_field(default=[] )
snake_case__ : Optional[List[int]] = list_field(default=[] )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : List[int] = list_field(default=[] )
snake_case__ : List[int] = list_field(default=[1, 2, 3] )
snake_case__ : List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
snake_case__ : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : List[int] = field()
snake_case__ : str = field()
snake_case__ : BasicEnum = field()
def _A ( self : Dict ):
UpperCamelCase :List[str] = BasicEnum(self.required_enum )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : int
snake_case__ : "BasicEnum" = field()
snake_case__ : "Optional[bool]" = None
snake_case__ : "str" = field(default="""toto""" , metadata={"""help""": """help message"""} )
snake_case__ : "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : bool = False
snake_case__ : bool = True
snake_case__ : bool | None = None
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : int | None = None
snake_case__ : float | None = field(default=_a , metadata={"""help""": """help message"""} )
snake_case__ : str | None = None
snake_case__ : list[str] | None = list_field(default=[] )
snake_case__ : list[int] | None = list_field(default=[] )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Dict , __lowerCamelCase : argparse.ArgumentParser , __lowerCamelCase : argparse.ArgumentParser ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCamelCase :List[Any] = {k: v for k, v in vars(__lowerCamelCase ).items() if k != """container"""}
UpperCamelCase :Union[str, Any] = {k: v for k, v in vars(__lowerCamelCase ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , __lowerCamelCase ) and yy.get("""choices""" , __lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](__lowerCamelCase ) , yy["""type"""](__lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[Any] ):
UpperCamelCase :List[Any] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument("""--bar""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument("""--baz""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument("""--flag""" , type=__lowerCamelCase , default=__lowerCamelCase , const=__lowerCamelCase , nargs="""?""" )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :str = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((UpperCamelCase) , ) :List[Any] = parser.parse_args_into_dataclasses(__lowerCamelCase , look_for_args_file=__lowerCamelCase )
self.assertFalse(example.flag )
def _A ( self : str ):
UpperCamelCase :Union[str, Any] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=__lowerCamelCase )
expected.add_argument("""--baz""" , default="""toto""" , type=__lowerCamelCase , help="""help message""" )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[int] ):
UpperCamelCase :Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__lowerCamelCase , default=__lowerCamelCase , const=__lowerCamelCase , nargs="""?""" )
expected.add_argument("""--baz""" , type=__lowerCamelCase , default=__lowerCamelCase , const=__lowerCamelCase , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=__lowerCamelCase , dest="""baz""" )
expected.add_argument("""--opt""" , type=__lowerCamelCase , default=__lowerCamelCase )
UpperCamelCase :Tuple = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__lowerCamelCase )
for dataclass_type in dataclass_types:
UpperCamelCase :Union[str, Any] = HfArgumentParser(__lowerCamelCase )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Tuple = parser.parse_args([] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
UpperCamelCase :Any = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
UpperCamelCase :Optional[int] = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
UpperCamelCase :List[Any] = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
UpperCamelCase :Optional[int] = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
def _A ( self : Any ):
UpperCamelCase :Optional[int] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Tuple = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
UpperCamelCase :str = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCamelCase :str = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
UpperCamelCase :Tuple = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCamelCase :Optional[int] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
UpperCamelCase :List[Any] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _A ( self : List[str] ):
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : Literal["titi", "toto", 4_2] = "toto"
UpperCamelCase :Optional[Any] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :List[str] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Optional[Any] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
UpperCamelCase :int = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
UpperCamelCase :List[str] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def _A ( self : Tuple ):
UpperCamelCase :Any = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :int = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=__lowerCamelCase )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=__lowerCamelCase )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=__lowerCamelCase )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=__lowerCamelCase )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[Any] = parser.parse_args([] )
self.assertEqual(
__lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCamelCase :Tuple = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(__lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def _A ( self : Optional[Any] ):
UpperCamelCase :Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=__lowerCamelCase , type=__lowerCamelCase )
expected.add_argument("""--bar""" , default=__lowerCamelCase , type=__lowerCamelCase , help="""help message""" )
expected.add_argument("""--baz""" , default=__lowerCamelCase , type=__lowerCamelCase )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=__lowerCamelCase )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=__lowerCamelCase )
UpperCamelCase :List[Any] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__lowerCamelCase )
for dataclass_type in dataclass_types:
UpperCamelCase :List[Any] = HfArgumentParser(__lowerCamelCase )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Tuple = parser.parse_args([] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , bar=__lowerCamelCase , baz=__lowerCamelCase , ces=[] , des=[] ) )
UpperCamelCase :List[str] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(__lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def _A ( self : Any ):
UpperCamelCase :Any = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Dict = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument("""--required_str""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=__lowerCamelCase , )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : List[Any] ):
UpperCamelCase :Dict = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=__lowerCamelCase , )
expected.add_argument("""--opt""" , type=__lowerCamelCase , default=__lowerCamelCase )
expected.add_argument("""--baz""" , default="""toto""" , type=__lowerCamelCase , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=__lowerCamelCase )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Any ):
UpperCamelCase :Optional[int] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Optional[int] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
UpperCamelCase :List[str] = parser.parse_dict(__lowerCamelCase )[0]
UpperCamelCase :Union[str, Any] = BasicExample(**__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : List[str] ):
UpperCamelCase :Optional[Any] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :int = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(__lowerCamelCase , parser.parse_dict , __lowerCamelCase , allow_extra_keys=__lowerCamelCase )
def _A ( self : List[str] ):
UpperCamelCase :Tuple = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Dict = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase :int = os.path.join(__lowerCamelCase , """temp_json""" )
os.mkdir(__lowerCamelCase )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
UpperCamelCase :Dict = BasicExample(**__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[int] ):
UpperCamelCase :Tuple = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Any = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase :Any = os.path.join(__lowerCamelCase , """temp_yaml""" )
os.mkdir(__lowerCamelCase )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[Any] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
UpperCamelCase :List[str] = BasicExample(**__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[int] ):
UpperCamelCase :Optional[Any] = HfArgumentParser(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
| 590 | 1 |
from __future__ import annotations
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : list[list[str]] , UpperCamelCase__ : int , ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = len(UpperCamelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(UpperCamelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , UpperCamelCase__ , UpperCamelCase__ , )
def lowerCAmelCase ( UpperCamelCase__ : int ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: list[list[str]] = []
depth_first_search([] , [] , [] , UpperCamelCase__ , UpperCamelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(UpperCamelCase__ )
print('''''' )
print(len(UpperCamelCase__ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 202 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class a :
def __init__( self , _lowerCAmelCase ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__SCREAMING_SNAKE_CASE: Optional[int] = deepcopy(_lowerCAmelCase )
elif os.path.exists(_lowerCAmelCase ):
with io.open(_lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
__SCREAMING_SNAKE_CASE: int = json.load(_lowerCAmelCase )
else:
try:
__SCREAMING_SNAKE_CASE: Union[str, Any] = baseaa.urlsafe_baadecode(_lowerCAmelCase ).decode('''utf-8''' )
__SCREAMING_SNAKE_CASE: List[Any] = json.loads(_lowerCAmelCase )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
__SCREAMING_SNAKE_CASE: str = config
self.set_stage_and_offload()
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = self.get_value('''zero_optimization.stage''' , -1 )
# offload
__SCREAMING_SNAKE_CASE: Dict = False
if self.is_zeroa() or self.is_zeroa():
__SCREAMING_SNAKE_CASE: str = set(['''cpu''', '''nvme'''] )
__SCREAMING_SNAKE_CASE: Tuple = set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
__SCREAMING_SNAKE_CASE: Dict = True
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = self.config
# find the config node of interest if it exists
__SCREAMING_SNAKE_CASE: Optional[Any] = ds_key_long.split('''.''' )
__SCREAMING_SNAKE_CASE: Union[str, Any] = nodes.pop()
for node in nodes:
__SCREAMING_SNAKE_CASE: int = config.get(_lowerCAmelCase )
if config is None:
return None, ds_key
return config, ds_key
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: List[Any] = self.find_config_node(_lowerCAmelCase )
if config is None:
return default
return config.get(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = self.config
# find the config node of interest if it exists
__SCREAMING_SNAKE_CASE: str = ds_key_long.split('''.''' )
for node in nodes:
__SCREAMING_SNAKE_CASE: Tuple = config
__SCREAMING_SNAKE_CASE: List[str] = config.get(_lowerCAmelCase )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = self.get_value(_lowerCAmelCase )
return False if value is None else bool(_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = self.get_value(_lowerCAmelCase )
return False if value is None else not bool(_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
return self._stage == 2
def snake_case_ ( self ):
"""simple docstring"""
return self._stage == 3
def snake_case_ ( self ):
"""simple docstring"""
return self._offload
class a :
def __init__( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = engine
def snake_case_ ( self , _lowerCAmelCase , **_lowerCAmelCase ):
"""simple docstring"""
self.engine.backward(_lowerCAmelCase , **_lowerCAmelCase )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class a ( __lowercase ):
def __init__( self , _lowerCAmelCase ):
"""simple docstring"""
super().__init__(_lowerCAmelCase , device_placement=_lowerCAmelCase , scaler=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: str = hasattr(self.optimizer , '''overflow''' )
def snake_case_ ( self , _lowerCAmelCase=None ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def snake_case_ ( self ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def snake_case_ ( self ):
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class a ( __lowercase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class a :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=0.001 , _lowerCAmelCase=0 , **_lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = params
__SCREAMING_SNAKE_CASE: int = lr
__SCREAMING_SNAKE_CASE: List[str] = weight_decay
__SCREAMING_SNAKE_CASE: List[Any] = kwargs
class a :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=0 , **_lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = optimizer
__SCREAMING_SNAKE_CASE: int = total_num_steps
__SCREAMING_SNAKE_CASE: Optional[int] = warmup_num_steps
__SCREAMING_SNAKE_CASE: str = kwargs
| 202 | 1 |
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Any = generate_pascal_triangle(lowercase__ )
for row_idx in range(lowercase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def _lowercase ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__lowerCAmelCase : list[list[int]] = []
for current_row_idx in range(lowercase__ ):
__lowerCAmelCase : int = populate_current_row(lowercase__ , lowercase__ )
triangle.append(lowercase__ )
return triangle
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : str = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__lowerCAmelCase : Optional[Any] = 1, 1
for current_col_idx in range(1 , lowercase__ ):
calculate_current_element(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return current_row
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__lowerCAmelCase : str = triangle[current_row_idx - 1][current_col_idx - 1]
__lowerCAmelCase : str = triangle[current_row_idx - 1][current_col_idx]
__lowerCAmelCase : Union[str, Any] = above_to_left_elt + above_to_right_elt
def _lowercase ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__lowerCAmelCase : list[list[int]] = [[1]]
for row_index in range(1 , lowercase__ ):
__lowerCAmelCase : Union[str, Any] = [0] + result[-1] + [0]
__lowerCAmelCase : str = row_index + 1
# Calculate the number of distinct elements in a row
__lowerCAmelCase : Any = sum(divmod(lowercase__ , 2 ) )
__lowerCAmelCase : int = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__lowerCAmelCase : int = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__lowerCAmelCase : Tuple = row_first_half + row_second_half
result.append(lowercase__ )
return result
def _lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase__ , lowercase__ ) -> None:
__lowerCAmelCase : Any = f"""{func.__name__}({value})"""
__lowerCAmelCase : Any = timeit(f"""__main__.{call}""" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowercase__ , lowercase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 715 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 583 | 0 |
'''simple docstring'''
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Any = [1]
for i in range(2 , snake_case ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
SCREAMING_SNAKE_CASE:Tuple = []
SCREAMING_SNAKE_CASE:Optional[int] = list(range(snake_case ) )
# Find permutation
while factorials:
SCREAMING_SNAKE_CASE:List[str] = factorials.pop()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[int] = divmod(snake_case , snake_case )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _snake_case ( _a ):
_A : List[str] = '''camembert'''
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=30_522 ,SCREAMING_SNAKE_CASE__ : int=768 ,SCREAMING_SNAKE_CASE__ : List[Any]=12 ,SCREAMING_SNAKE_CASE__ : Any=12 ,SCREAMING_SNAKE_CASE__ : Tuple=3_072 ,SCREAMING_SNAKE_CASE__ : str="gelu" ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Dict=512 ,SCREAMING_SNAKE_CASE__ : List[str]=2 ,SCREAMING_SNAKE_CASE__ : Tuple=0.02 ,SCREAMING_SNAKE_CASE__ : Any=1e-12 ,SCREAMING_SNAKE_CASE__ : str=1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 ,SCREAMING_SNAKE_CASE__ : Any="absolute" ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,**SCREAMING_SNAKE_CASE__ : Tuple ,):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE:str = hidden_size
SCREAMING_SNAKE_CASE:str = num_hidden_layers
SCREAMING_SNAKE_CASE:List[str] = num_attention_heads
SCREAMING_SNAKE_CASE:Optional[int] = hidden_act
SCREAMING_SNAKE_CASE:int = intermediate_size
SCREAMING_SNAKE_CASE:List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE:Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE:str = max_position_embeddings
SCREAMING_SNAKE_CASE:Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE:Optional[int] = initializer_range
SCREAMING_SNAKE_CASE:Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE:Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE:Optional[int] = use_cache
SCREAMING_SNAKE_CASE:List[Any] = classifier_dropout
class _snake_case ( _a ):
@property
def __UpperCamelCase ( self : List[str] ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE:Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE:str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 143 | 1 |
import numpy as np
from PIL import Image
def snake_case ( snake_case__ , snake_case__ , snake_case__) -> Dict:
_A = np.array(snake_case__)
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""")
_A = 0
_A = 0
_A = 0
_A = 0
# compute the shape of the output matrix
_A = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
_A = np.zeros((maxpool_shape, maxpool_shape))
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
_A = np.max(arr[i : i + size, j : j + size])
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_A = 0
_A = 0
return updated_arr
def snake_case ( snake_case__ , snake_case__ , snake_case__) -> Dict:
_A = np.array(snake_case__)
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""")
_A = 0
_A = 0
_A = 0
_A = 0
# compute the shape of the output matrix
_A = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
_A = np.zeros((avgpool_shape, avgpool_shape))
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
_A = int(np.average(arr[i : i + size, j : j + size]))
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_A = 0
_A = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
_SCREAMING_SNAKE_CASE = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 721 | _SCREAMING_SNAKE_CASE = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_SCREAMING_SNAKE_CASE = {value: key for key, value in MORSE_CODE_DICT.items()}
def snake_case ( snake_case__ :str) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper())
def snake_case ( snake_case__ :str) -> str:
return "".join(REVERSE_DICT[char] for char in message.split())
def snake_case ( ) -> None:
_A = """Morse code here!"""
print(snake_case__)
_A = encrypt(snake_case__)
print(snake_case__)
_A = decrypt(snake_case__)
print(snake_case__)
if __name__ == "__main__":
main()
| 83 | 0 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __magic_name__ ( lowercase , lowercase , lowercase ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Dict = AutoConfig.from_pretrained(lowercase )
lowercase_ : Tuple = FlaxAutoModelForSeqaSeqLM.from_config(config=lowercase )
lowercase_ : Any = checkpoints.load_tax_checkpoint(lowercase )
lowercase_ : Tuple = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""]
if config.model_type == "t5":
lowercase_ : List[str] = """SelfAttention"""
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase_ : Tuple = """LocalSelfAttention"""
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ : List[Any] = """TransientGlobalSelfAttention"""
else:
raise ValueError(
"""Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"""
""" attribute with a value from ['local', 'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
lowercase_ : Tuple = f"""layers_{str(lowercase )}"""
# Self-Attention
lowercase_ : Optional[int] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""]
lowercase_ : Optional[int] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""]
lowercase_ : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""]
lowercase_ : Union[str, Any] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""]
# Layer Normalization
lowercase_ : Any = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""]
if split_mlp_wi:
lowercase_ : str = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
lowercase_ : Union[str, Any] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
lowercase_ : int = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
lowercase_ : Optional[Any] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
lowercase_ : List[Any] = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
lowercase_ : List[Any] = flax_model.params["""encoder"""]["""block"""][str(lowercase )]["""layer"""]
lowercase_ : List[str] = tax_attention_key
lowercase_ : Optional[int] = tax_attention_out
lowercase_ : Union[str, Any] = tax_attention_query
lowercase_ : str = tax_attention_value
lowercase_ : Optional[int] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ : Union[str, Any] = tax_global_layer_norm
if split_mlp_wi:
lowercase_ : Tuple = tax_mlp_wi_a
lowercase_ : Union[str, Any] = tax_mlp_wi_a
else:
lowercase_ : Tuple = tax_mlp_wi
lowercase_ : List[str] = tax_mlp_wo
lowercase_ : List[str] = tax_mlp_layer_norm
lowercase_ : Dict = flax_model_encoder_layer_block
# Only for layer 0:
lowercase_ : Tuple = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T
lowercase_ : int = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ : Dict = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T
lowercase_ : Any = tax_encoder_global_rel_embedding
# Assigning
lowercase_ : Optional[Any] = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""]
lowercase_ : Optional[int] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowercase_ : Tuple = f"""layers_{str(lowercase )}"""
# Self-Attention
lowercase_ : Tuple = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""]
lowercase_ : Dict = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""]
lowercase_ : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""]
lowercase_ : Union[str, Any] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""]
# Layer Normalization
lowercase_ : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][
"""scale"""
]
# Encoder-Decoder-Attention
lowercase_ : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""]
lowercase_ : Any = tax_enc_dec_attention_module["""key"""]["""kernel"""]
lowercase_ : Optional[int] = tax_enc_dec_attention_module["""out"""]["""kernel"""]
lowercase_ : str = tax_enc_dec_attention_module["""query"""]["""kernel"""]
lowercase_ : Union[str, Any] = tax_enc_dec_attention_module["""value"""]["""kernel"""]
# Layer Normalization
lowercase_ : Union[str, Any] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""]
# MLP
if split_mlp_wi:
lowercase_ : Any = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
lowercase_ : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
lowercase_ : int = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
lowercase_ : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
lowercase_ : Tuple = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
lowercase_ : Optional[Any] = flax_model.params["""decoder"""]["""block"""][str(lowercase )]["""layer"""]
lowercase_ : Dict = tax_attention_key
lowercase_ : List[str] = tax_attention_out
lowercase_ : Tuple = tax_attention_query
lowercase_ : Dict = tax_attention_value
lowercase_ : Optional[int] = tax_pre_attention_layer_norm
lowercase_ : Dict = tax_enc_dec_attention_key
lowercase_ : Dict = tax_enc_dec_attention_out
lowercase_ : Any = tax_enc_dec_attention_query
lowercase_ : str = tax_enc_dec_attention_value
lowercase_ : Any = tax_cross_layer_norm
if split_mlp_wi:
lowercase_ : Union[str, Any] = tax_mlp_wi_a
lowercase_ : Union[str, Any] = tax_mlp_wi_a
else:
lowercase_ : Tuple = tax_mlp_wi
lowercase_ : Optional[int] = tax_mlp_wo
lowercase_ : Any = txa_mlp_layer_norm
lowercase_ : Dict = flax_model_decoder_layer_block
# Decoder Normalization
lowercase_ : Optional[Any] = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""]
lowercase_ : List[Any] = txa_decoder_norm
# Only for layer 0:
lowercase_ : Union[str, Any] = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T
lowercase_ : List[Any] = tax_decoder_rel_embedding
# Token Embeddings
lowercase_ : Optional[int] = tax_model["""target"""]["""token_embedder"""]["""embedding"""]
lowercase_ : Tuple = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase_ : Tuple = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""]
flax_model.save_pretrained(lowercase )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
UpperCAmelCase_ = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path) | 458 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __magic_name__ ( ) -> str:
"""simple docstring"""
lowercase_ : Optional[int] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowercase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowercase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowercase )
return parser.parse_args()
def __magic_name__ ( ) -> List[Any]:
"""simple docstring"""
lowercase_ : Union[str, Any] = parse_args()
# Import training_script as a module.
lowercase_ : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase_ : Tuple = script_fpath.stem
lowercase_ : List[str] = importlib.import_module(lowercase )
# Patch sys.argv
lowercase_ : Optional[int] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 458 | 1 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class lowerCamelCase_ :
_lowerCamelCase : Optional[int] = 42
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Any = None
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : TreeNode | None ) -> Dict:
"""simple docstring"""
def is_valid_tree(UpperCamelCase : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(UpperCamelCase , UpperCamelCase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(UpperCamelCase ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
UpperCamelCase : TreeNode | None , UpperCamelCase : float , UpperCamelCase : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , UpperCamelCase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , UpperCamelCase )
)
return is_binary_search_tree_recursive_check(UpperCamelCase , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 705 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_A = logging.getLogger()
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
a_ = argparse.ArgumentParser()
parser.add_argument("""-f""" )
a_ = parser.parse_args()
return args.f
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
a_ = {}
a_ = os.path.join(UpperCamelCase , """all_results.json""" )
if os.path.exists(UpperCamelCase ):
with open(UpperCamelCase , """r""" ) as f:
a_ = json.load(UpperCamelCase )
else:
raise ValueError(F"""can't find {path}""" )
return results
def __SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
a_ = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
_A = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
@classmethod
def __magic_name__ ( cls ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
a_ = tempfile.mkdtemp()
a_ = os.path.join(cls.tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
a_ = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def __magic_name__ ( cls ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["""perplexity"""] , 100 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["""perplexity"""] , 42 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
a_ = 7 if get_gpu_count() > 1 else 2
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
self.assertLess(result["""train_loss"""] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] , 28 )
self.assertGreaterEqual(result["""eval_exact"""] , 28 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_rouge1"""] , 10 )
self.assertGreaterEqual(result["""eval_rouge2"""] , 2 )
self.assertGreaterEqual(result["""eval_rougeL"""] , 7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] , 7 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_bleu"""] , 30 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """translation_no_trainer""" ) ) )
@slow
def __magic_name__ ( self ):
a_ = logging.StreamHandler(sys.stdout )
logger.addHandler(_SCREAMING_SNAKE_CASE )
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] , 0.1_0 )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """image_classification_no_trainer""" ) ) ) | 403 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 1000 ):
lowerCAmelCase = -1
lowerCAmelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowerCAmelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowerCAmelCase = n - a - b
if c * c == (a * a + b * b):
lowerCAmelCase = a * b * c
if candidate >= product:
lowerCAmelCase = candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 4 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ):
lowerCAmelCase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowerCAmelCase = 4
lowerCAmelCase = 48
lowerCAmelCase = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase = [6, 6, 6, 6]
lowerCAmelCase = 60
lowerCAmelCase = [6, 6, 6, 6]
lowerCAmelCase = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase = 4
lowerCAmelCase = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = 126
lowerCAmelCase = 7
lowerCAmelCase = 255.0
lowerCAmelCase = ''
return config
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
if "patch_embed.proj" in name and "layers" not in name:
lowerCAmelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowerCAmelCase = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
lowerCAmelCase = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
lowerCAmelCase = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
lowerCAmelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCAmelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCAmelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCAmelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
lowerCAmelCase = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
lowerCAmelCase = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
lowerCAmelCase = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
lowerCAmelCase = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
lowerCAmelCase = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
lowerCAmelCase = 'layernorm.weight'
if name == "norm.bias":
lowerCAmelCase = 'layernorm.bias'
if "conv_first" in name:
lowerCAmelCase = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowerCAmelCase = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowerCAmelCase = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
lowerCAmelCase = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
lowerCAmelCase = name.replace('upsample.2' , 'upsample.convolution_1' )
lowerCAmelCase = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
lowerCAmelCase = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
lowerCAmelCase = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
lowerCAmelCase = 'swin2sr.' + name
return name
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase = orig_state_dict.pop(_UpperCAmelCase )
if "qkv" in key:
lowerCAmelCase = key.split('.' )
lowerCAmelCase = int(key_split[1] )
lowerCAmelCase = int(key_split[4] )
lowerCAmelCase = config.embed_dim
if "weight" in key:
lowerCAmelCase = val[:dim, :]
lowerCAmelCase = val[dim : dim * 2, :]
lowerCAmelCase = val[-dim:, :]
else:
lowerCAmelCase = val[:dim]
lowerCAmelCase = val[dim : dim * 2]
lowerCAmelCase = val[-dim:]
pass
else:
lowerCAmelCase = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ):
lowerCAmelCase = get_config(_UpperCAmelCase )
lowerCAmelCase = SwinaSRForImageSuperResolution(_UpperCAmelCase )
model.eval()
lowerCAmelCase = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' )
lowerCAmelCase = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase ,lowerCAmelCase = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_UpperCAmelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'Unexpected key {key} in state_dict' )
# verify values
lowerCAmelCase = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
lowerCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
lowerCAmelCase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowerCAmelCase = 126 if 'Jpeg' in checkpoint_url else 256
lowerCAmelCase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowerCAmelCase = transforms(_UpperCAmelCase ).unsqueeze(0 )
if config.num_channels == 1:
lowerCAmelCase = pixel_values[:, 0, :, :].unsqueeze(1 )
lowerCAmelCase = model(_UpperCAmelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 512, 512] )
lowerCAmelCase = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 512, 512] )
lowerCAmelCase = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _UpperCAmelCase , atol=1e-3 )
print('Looks ok!' )
lowerCAmelCase = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
lowerCAmelCase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
model.push_to_hub(F'caidas/{model_name}' )
processor.push_to_hub(F'caidas/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 1 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( __lowercase , __lowercase , __lowercase ) -> List[Any]:
"""simple docstring"""
def get_masked_lm_array(__lowercase ):
A__ : int = F'masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'
A__ : Dict = tf.train.load_variable(__lowercase , __lowercase )
if "kernel" in name:
A__ : Any = array.transpose()
return torch.from_numpy(__lowercase )
def get_encoder_array(__lowercase ):
A__ : Dict = F'encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'
A__ : Tuple = tf.train.load_variable(__lowercase , __lowercase )
if "kernel" in name:
A__ : str = array.transpose()
return torch.from_numpy(__lowercase )
def get_encoder_layer_array(__lowercase , __lowercase ):
A__ : int = F'encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'
A__ : Dict = tf.train.load_variable(__lowercase , __lowercase )
if "kernel" in name:
A__ : Any = array.transpose()
return torch.from_numpy(__lowercase )
def get_encoder_attention_layer_array(__lowercase , __lowercase , __lowercase ):
A__ : List[str] = F'encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'
A__ : List[Any] = tf.train.load_variable(__lowercase , __lowercase )
A__ : int = array.reshape(__lowercase )
if "kernel" in name:
A__ : Union[str, Any] = array.transpose()
return torch.from_numpy(__lowercase )
print(F'Loading model based on config from {config_path}...' )
A__ : Tuple = BertConfig.from_json_file(__lowercase )
A__ : List[str] = BertForMaskedLM(__lowercase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
A__ : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
A__ : BertSelfAttention = layer.attention.self
A__ : Optional[Any] = get_encoder_attention_layer_array(
__lowercase , "_query_dense/kernel" , self_attn.query.weight.data.shape )
A__ : List[Any] = get_encoder_attention_layer_array(
__lowercase , "_query_dense/bias" , self_attn.query.bias.data.shape )
A__ : int = get_encoder_attention_layer_array(
__lowercase , "_key_dense/kernel" , self_attn.key.weight.data.shape )
A__ : Tuple = get_encoder_attention_layer_array(
__lowercase , "_key_dense/bias" , self_attn.key.bias.data.shape )
A__ : List[Any] = get_encoder_attention_layer_array(
__lowercase , "_value_dense/kernel" , self_attn.value.weight.data.shape )
A__ : Optional[Any] = get_encoder_attention_layer_array(
__lowercase , "_value_dense/bias" , self_attn.value.bias.data.shape )
# Self-attention Output
A__ : BertSelfOutput = layer.attention.output
A__ : Tuple = get_encoder_attention_layer_array(
__lowercase , "_output_dense/kernel" , self_output.dense.weight.data.shape )
A__ : Any = get_encoder_attention_layer_array(
__lowercase , "_output_dense/bias" , self_output.dense.bias.data.shape )
A__ : Dict = get_encoder_layer_array(__lowercase , "_attention_layer_norm/gamma" )
A__ : Optional[int] = get_encoder_layer_array(__lowercase , "_attention_layer_norm/beta" )
# Intermediate
A__ : BertIntermediate = layer.intermediate
A__ : Optional[Any] = get_encoder_layer_array(__lowercase , "_intermediate_dense/kernel" )
A__ : Any = get_encoder_layer_array(__lowercase , "_intermediate_dense/bias" )
# Output
A__ : BertOutput = layer.output
A__ : Dict = get_encoder_layer_array(__lowercase , "_output_dense/kernel" )
A__ : List[Any] = get_encoder_layer_array(__lowercase , "_output_dense/bias" )
A__ : Optional[Any] = get_encoder_layer_array(__lowercase , "_output_layer_norm/gamma" )
A__ : Dict = get_encoder_layer_array(__lowercase , "_output_layer_norm/beta" )
# Embeddings
A__ : str = get_encoder_array("_position_embedding_layer/embeddings" )
A__ : List[str] = get_encoder_array("_type_embedding_layer/embeddings" )
A__ : List[Any] = get_encoder_array("_embedding_norm_layer/gamma" )
A__ : Optional[int] = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
A__ : Optional[Any] = model.cls.predictions.transform
A__ : int = get_masked_lm_array("dense/kernel" )
A__ : Tuple = get_masked_lm_array("dense/bias" )
A__ : Optional[Any] = get_masked_lm_array("layer_norm/gamma" )
A__ : Tuple = get_masked_lm_array("layer_norm/beta" )
A__ : Any = get_masked_lm_array("embedding_table" )
# Pooling
A__ : Optional[Any] = BertPooler(config=__lowercase )
A__ : BertPooler = get_encoder_array("_pooler_layer/kernel" )
A__ : BertPooler = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(__lowercase )
# Integration test - should load without any errors ;)
A__ : Union[str, Any] = BertForMaskedLM.from_pretrained(__lowercase )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
snake_case : str = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model.',
)
snake_case : Dict = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 182 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Optional[int] = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class lowerCAmelCase__ ( UpperCamelCase ):
__A : Optional[Any] = 'data2vec-text'
def __init__( self : Optional[Any] , _A : List[str]=3_0522 , _A : Optional[int]=768 , _A : Union[str, Any]=12 , _A : str=12 , _A : Optional[int]=3072 , _A : Dict="gelu" , _A : List[str]=0.1 , _A : Any=0.1 , _A : str=512 , _A : Any=2 , _A : str=0.02 , _A : List[str]=1e-12 , _A : Optional[int]=1 , _A : Any=0 , _A : Any=2 , _A : List[Any]="absolute" , _A : Tuple=True , _A : str=None , **_A : List[Any] , ):
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A)
A__ : Optional[int] = vocab_size
A__ : Tuple = hidden_size
A__ : str = num_hidden_layers
A__ : Union[str, Any] = num_attention_heads
A__ : Dict = hidden_act
A__ : List[Any] = intermediate_size
A__ : int = hidden_dropout_prob
A__ : Dict = attention_probs_dropout_prob
A__ : Union[str, Any] = max_position_embeddings
A__ : Tuple = type_vocab_size
A__ : Union[str, Any] = initializer_range
A__ : str = layer_norm_eps
A__ : int = position_embedding_type
A__ : str = use_cache
A__ : int = classifier_dropout
class lowerCAmelCase__ ( UpperCamelCase ):
@property
def _lowercase ( self : Tuple):
if self.task == "multiple-choice":
A__ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
A__ : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
]) | 182 | 1 |
import os
import sys
import unittest
A__ : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A__ : Tuple = os.path.join(git_repo_path, 'src', 'diffusers')
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = find_backend(''' if not is_torch_available():''' )
self.assertEqual(lowerCamelCase, '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
lowercase__ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(lowerCamelCase, '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
lowercase__ = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(lowerCamelCase, '''torch_and_transformers_and_onnx''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''', lowerCamelCase )
self.assertIn('''torch_and_transformers''', lowerCamelCase )
self.assertIn('''flax_and_transformers''', lowerCamelCase )
self.assertIn('''torch_and_transformers_and_onnx''', lowerCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''', objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''', objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''', objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''', objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''', objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''', objects['''torch_and_transformers_and_onnx'''] )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = create_dummy_object('''CONSTANT''', '''\'torch\'''' )
self.assertEqual(lowerCamelCase, '''\nCONSTANT = None\n''' )
lowercase__ = create_dummy_object('''function''', '''\'torch\'''' )
self.assertEqual(
lowerCamelCase, '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
lowercase__ = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
lowercase__ = create_dummy_object('''FakeClass''', '''\'torch\'''' )
self.assertEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
lowercase__ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''], lowerCamelCase )
| 183 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any], lowerCamelCase : Dict, lowerCamelCase : Dict=13, lowerCamelCase : Optional[int]=7, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : List[Any]=99, lowerCamelCase : Any=32, lowerCamelCase : List[str]=5, lowerCamelCase : Union[str, Any]=4, lowerCamelCase : Union[str, Any]=37, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Tuple=0.1, lowerCamelCase : List[str]=0.1, lowerCamelCase : Optional[Any]=512, lowerCamelCase : int=16, lowerCamelCase : str=2, lowerCamelCase : int=0.02, lowerCamelCase : int=4, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = True
lowercase__ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''', from_pt=lowerCamelCase )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''', from_pt=lowerCamelCase )
lowercase__ = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]], dtype=jnp.intaa )
lowercase__ = model(lowerCamelCase )[0]
lowercase__ = [1, 11, 50_265]
self.assertEqual(list(output.shape ), lowerCamelCase )
# compare the actual values for a slice.
lowercase__ = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], lowerCamelCase, atol=1E-4 ) )
@slow
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''', from_pt=lowerCamelCase )
lowercase__ = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]], dtype=jnp.intaa )
lowercase__ = model(lowerCamelCase )[0]
# compare the actual values for a slice.
lowercase__ = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], lowerCamelCase, atol=1E-4 ) )
| 183 | 1 |
a_ = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 702 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionXLImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__lowerCamelCase = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , )
__lowerCamelCase = CLIPTextModel(__UpperCAmelCase )
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__UpperCAmelCase )
__lowerCamelCase = CLIPTextModelWithProjection(__UpperCAmelCase )
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__UpperCAmelCase )
__lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__lowerCamelCase = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__lowerCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = sd_pipe(**__UpperCAmelCase ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# forward without prompt embeds
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = 3 * ['''this is a negative prompt''']
__lowerCamelCase = negative_prompt
__lowerCamelCase = 3 * [inputs['''prompt''']]
__lowerCamelCase = sd_pipe(**__UpperCAmelCase )
__lowerCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = 3 * ['''this is a negative prompt''']
__lowerCamelCase = 3 * [inputs.pop('''prompt''' )]
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = sd_pipe.encode_prompt(__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
__lowerCamelCase = sd_pipe(
**__UpperCAmelCase , prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , pooled_prompt_embeds=__UpperCAmelCase , negative_pooled_prompt_embeds=__UpperCAmelCase , )
__lowerCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ):
'''simple docstring'''
__lowerCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__lowerCamelCase = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
__lowerCamelCase = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
__lowerCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = self.get_inputs(__UpperCAmelCase )
__lowerCamelCase = pipe(**__UpperCAmelCase ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 622 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class __UpperCamelCase :
def __init__( self , __a ):
'''simple docstring'''
__a : Optional[Any] = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []} )
for keyword in keywords:
self.add_keyword(snake_case__ )
self.set_fail_transitions()
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Optional[int] = 0
for character in keyword:
__a : Optional[Any] = self.find_next_state(snake_case__ , snake_case__ )
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
__a : List[Any] = len(self.adlist ) - 1
else:
__a : Dict = next_state
self.adlist[current_state]["output"].append(snake_case__ )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = deque()
for node in self.adlist[0]["next_states"]:
q.append(snake_case__ )
__a : Any = 0
while q:
__a : Optional[Any] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(snake_case__ )
__a : Optional[Any] = self.adlist[r]['fail_state']
while (
self.find_next_state(snake_case__ , self.adlist[child]['value'] ) is None
and state != 0
):
__a : List[str] = self.adlist[state]['fail_state']
__a : Union[str, Any] = self.find_next_state(
snake_case__ , self.adlist[child]['value'] )
if self.adlist[child]["fail_state"] is None:
__a : Optional[Any] = 0
__a : Tuple = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : int = {} # returns a dict with keywords and list of its occurrences
__a : Tuple = 0
for i in range(len(snake_case__ ) ):
while (
self.find_next_state(snake_case__ , string[i] ) is None
and current_state != 0
):
__a : Dict = self.adlist[current_state]['fail_state']
__a : str = self.find_next_state(snake_case__ , string[i] )
if next_state is None:
__a : str = 0
else:
__a : Optional[int] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__a : Any = []
result[key].append(i - len(snake_case__ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 476 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase_ ( a_ , unittest.TestCase ):
_A : str = VideoToVideoSDPipeline
_A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'} ) - {'image', 'width', 'height'}
_A : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'} ) - {'image'}
_A : int = PipelineTesterMixin.required_optional_params - {'latents'}
_A : List[str] = False
# No `output_type`.
_A : Any = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
UpperCAmelCase = CLIPTextModel(snake_case__ )
UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCamelCase_ ( self , snake_case__ , snake_case__=0 ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith("""mps""" ):
UpperCAmelCase = torch.manual_seed(snake_case__ )
else:
UpperCAmelCase = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""video""": video,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = VideoToVideoSDPipeline(**snake_case__ )
UpperCAmelCase = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase = self.get_dummy_inputs(snake_case__ )
UpperCAmelCase = """np"""
UpperCAmelCase = sd_pipe(**snake_case__ ).frames
UpperCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ , expected_max_diff=5e-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class UpperCamelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase = torch.randn((1, 10, 3, 10_24, 5_76) , generator=snake_case__ )
UpperCAmelCase = video.to("""cuda""" )
UpperCAmelCase = """Spiderman is surfing"""
UpperCAmelCase = pipe(snake_case__ , video=snake_case__ , generator=snake_case__ , num_inference_steps=3 , output_type="""pt""" ).frames
UpperCAmelCase = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 673 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : List[str] , snake_case__ : str , snake_case__ : List[Any]=7 , snake_case__ : Dict=3 , snake_case__ : Optional[Any]=18 , snake_case__ : List[Any]=30 , snake_case__ : Union[str, Any]=400 , snake_case__ : Tuple=True , snake_case__ : Optional[int]=32 , snake_case__ : Dict=True , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size_divisor
lowerCAmelCase__ = do_rescale
def _SCREAMING_SNAKE_CASE ( self : Dict ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Dict = GLPNImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = GLPNImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case__ , """size_divisor""" ) )
self.assertTrue(hasattr(snake_case__ , """resample""" ) )
self.assertTrue(hasattr(snake_case__ , """do_rescale""" ) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 703 | """simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
def __init__( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any=13 , snake_case__ : int=30 , snake_case__ : int=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : Dict=True , snake_case__ : Optional[int]=True , snake_case__ : List[Any]=32 , snake_case__ : List[str]=2 , snake_case__ : Optional[Any]=4 , snake_case__ : Optional[int]=37 , snake_case__ : Tuple="gelu" , snake_case__ : str=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=10 , snake_case__ : Dict=0.02 , snake_case__ : Union[str, Any]=3 , snake_case__ : str=None , snake_case__ : List[Any]=2 , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = num_patches + 2
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
lowerCAmelCase__ = TFDeiTModel(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict ):
lowerCAmelCase__ = TFDeiTForMaskedImageModeling(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFDeiTForMaskedImageModeling(snake_case__ )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Tuple ):
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ )
lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Any = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : int = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = TFDeiTModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self : Any ):
pass
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Dense ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
lowerCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Union[str, Any]=False ):
lowerCAmelCase__ = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDeiTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any ):
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=snake_case__ , return_tensors="""tf""" )
# forward pass
lowerCAmelCase__ = model(**snake_case__ )
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase__ = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
| 674 | 0 |
"""simple docstring"""
def A_ (__a , __a ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
A_ = str(bin(__a ) )
binary_number += "0" * shift_amount
return binary_number
def A_ (__a , __a ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
A_ = str(bin(__a ) )[2:]
if shift_amount >= len(__a ):
return "0b0"
A_ = binary_number[: len(__a ) - shift_amount]
return "0b" + shifted_binary_number
def A_ (__a , __a ):
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
A_ = "0" + str(bin(__a ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
A_ = len(bin(__a )[3:] ) # Find 2's complement of number
A_ = bin(abs(__a ) - (1 << binary_number_length) )[3:]
A_ = (
"1" + "0" * (binary_number_length - len(__a )) + binary_number
)
if shift_amount >= len(__a ):
return "0b" + binary_number[0] * len(__a )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__a ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 115 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
def __init__( self : Optional[int] ) -> str:
"""simple docstring"""
A_ = []
def lowerCamelCase__ ( self : Tuple , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Optional[int] , **_snake_case : str ) -> int:
"""simple docstring"""
self.events.append("on_init_end" )
def lowerCamelCase__ ( self : Dict , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Tuple , **_snake_case : Tuple ) -> List[str]:
"""simple docstring"""
self.events.append("on_train_begin" )
def lowerCamelCase__ ( self : List[Any] , _snake_case : Any , _snake_case : Optional[Any] , _snake_case : Tuple , **_snake_case : Optional[int] ) -> str:
"""simple docstring"""
self.events.append("on_train_end" )
def lowerCamelCase__ ( self : Dict , _snake_case : str , _snake_case : List[Any] , _snake_case : str , **_snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
self.events.append("on_epoch_begin" )
def lowerCamelCase__ ( self : Optional[int] , _snake_case : Optional[int] , _snake_case : Any , _snake_case : Optional[Any] , **_snake_case : Optional[Any] ) -> str:
"""simple docstring"""
self.events.append("on_epoch_end" )
def lowerCamelCase__ ( self : List[Any] , _snake_case : List[str] , _snake_case : str , _snake_case : Optional[Any] , **_snake_case : Optional[int] ) -> List[str]:
"""simple docstring"""
self.events.append("on_step_begin" )
def lowerCamelCase__ ( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Any , **_snake_case : Union[str, Any] ) -> List[str]:
"""simple docstring"""
self.events.append("on_step_end" )
def lowerCamelCase__ ( self : Tuple , _snake_case : Dict , _snake_case : Dict , _snake_case : Union[str, Any] , **_snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
self.events.append("on_evaluate" )
def lowerCamelCase__ ( self : Optional[int] , _snake_case : Optional[Any] , _snake_case : int , _snake_case : int , **_snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
self.events.append("on_predict" )
def lowerCamelCase__ ( self : List[str] , _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : Optional[Any] , **_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.events.append("on_save" )
def lowerCamelCase__ ( self : Dict , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Optional[int] , **_snake_case : Dict ) -> str:
"""simple docstring"""
self.events.append("on_log" )
def lowerCamelCase__ ( self : Tuple , _snake_case : Optional[Any] , _snake_case : str , _snake_case : int , **_snake_case : List[Any] ) -> int:
"""simple docstring"""
self.events.append("on_prediction_step" )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
A_ = tempfile.mkdtemp()
def lowerCamelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.output_dir )
def lowerCamelCase__ ( self : List[str] , _snake_case : Any=0 , _snake_case : Optional[int]=0 , _snake_case : int=64 , _snake_case : Optional[int]=64 , _snake_case : List[Any]=None , _snake_case : List[Any]=False , **_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
A_ = RegressionDataset(length=_snake_case )
A_ = RegressionDataset(length=_snake_case )
A_ = RegressionModelConfig(a=_snake_case , b=_snake_case )
A_ = RegressionPreTrainedModel(_snake_case )
A_ = TrainingArguments(self.output_dir , disable_tqdm=_snake_case , report_to=[] , **_snake_case )
return Trainer(
_snake_case , _snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , callbacks=_snake_case , )
def lowerCamelCase__ ( self : List[Any] , _snake_case : Optional[Any] , _snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
# Order doesn't matter
A_ = sorted(_snake_case , key=lambda _snake_case : cb.__name__ if isinstance(_snake_case , _snake_case ) else cb.__class__.__name__ )
A_ = sorted(_snake_case , key=lambda _snake_case : cb.__name__ if isinstance(_snake_case , _snake_case ) else cb.__class__.__name__ )
for cba, cba in zip(_snake_case , _snake_case ):
if isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case ):
self.assertEqual(_snake_case , _snake_case )
elif isinstance(_snake_case , _snake_case ) and not isinstance(_snake_case , _snake_case ):
self.assertEqual(_snake_case , cba.__class__ )
elif not isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case ):
self.assertEqual(cba.__class__ , _snake_case )
else:
self.assertEqual(_snake_case , _snake_case )
def lowerCamelCase__ ( self : List[Any] , _snake_case : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
A_ = ["on_init_end", "on_train_begin"]
A_ = 0
A_ = len(trainer.get_eval_dataloader() )
A_ = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(_snake_case ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowerCamelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
A_ = self.get_trainer()
A_ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _snake_case )
# Callbacks passed at init are added to the default callbacks
A_ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(_snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _snake_case )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
A_ = self.get_trainer(disable_tqdm=_snake_case )
A_ = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _snake_case )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
A_ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
A_ = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(_snake_case )
expected_callbacks.remove(_snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _snake_case )
A_ = self.get_trainer()
A_ = trainer.pop_callback(_snake_case )
self.assertEqual(cb.__class__ , _snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _snake_case )
trainer.add_callback(_snake_case )
expected_callbacks.insert(0 , _snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _snake_case )
# We can also add, pop, or remove by instance
A_ = self.get_trainer()
A_ = trainer.callback_handler.callbacks[0]
trainer.remove_callback(_snake_case )
expected_callbacks.remove(_snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _snake_case )
A_ = self.get_trainer()
A_ = trainer.callback_handler.callbacks[0]
A_ = trainer.pop_callback(_snake_case )
self.assertEqual(_snake_case , _snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _snake_case )
trainer.add_callback(_snake_case )
expected_callbacks.insert(0 , _snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=_snake_case )
A_ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
A_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_snake_case , self.get_expected_events(_snake_case ) )
# Independent log/save/eval
A_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
A_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_snake_case , self.get_expected_events(_snake_case ) )
A_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
A_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_snake_case , self.get_expected_events(_snake_case ) )
A_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
A_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_snake_case , self.get_expected_events(_snake_case ) )
A_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
A_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_snake_case , self.get_expected_events(_snake_case ) )
# A bit of everything
A_ = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
A_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_snake_case , self.get_expected_events(_snake_case ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
A_ = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(_snake_case ) in warn_mock.call_args[0][0]
| 115 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__magic_name__ : List[str] = logging.get_logger(__name__)
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = ["""pixel_values"""]
def __init__( self : int , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , _SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , **_SCREAMING_SNAKE_CASE : Optional[Any] , ):
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = size if size is not None else {'shortest_edge': 256}
UpperCamelCase = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
UpperCamelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCamelCase = get_size_dict(_SCREAMING_SNAKE_CASE )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_normalize
UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Dict[str, int] , _SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : Tuple , ):
"""simple docstring"""
UpperCamelCase = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
UpperCamelCase = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size['shortest_edge'] , default_to_square=_SCREAMING_SNAKE_CASE )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Tuple , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Dict[str, int] , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : Any , ):
"""simple docstring"""
UpperCamelCase = get_size_dict(_SCREAMING_SNAKE_CASE )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size['height'], size['width']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Union[float, List[float]] , _SCREAMING_SNAKE_CASE : Union[float, List[float]] , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : Union[str, Any] , ):
"""simple docstring"""
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : ImageInput , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : PILImageResampling = None , _SCREAMING_SNAKE_CASE : bool = None , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Optional[float] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE : Dict , ):
"""simple docstring"""
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase = crop_size if crop_size is not None else self.crop_size
UpperCamelCase = get_size_dict(_SCREAMING_SNAKE_CASE )
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
UpperCamelCase = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase = {'pixel_values': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 707 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class A__ ( nn.Module ):
'''simple docstring'''
snake_case__ = 42
snake_case__ = 42
snake_case__ = 0.0
snake_case__ = 1
snake_case__ = 1
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
for i in range(self.num_layers ):
UpperCamelCase = self.in_channels if i == 0 else self.out_channels
UpperCamelCase = FlaxResnetBlockaD(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = resnets
UpperCamelCase = attentions
if self.add_downsample:
UpperCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any]=True ):
"""simple docstring"""
UpperCamelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
UpperCamelCase = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
UpperCamelCase = attn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
UpperCamelCase = self.downsamplers_a(_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class A__ ( nn.Module ):
'''simple docstring'''
snake_case__ = 42
snake_case__ = 42
snake_case__ = 0.0
snake_case__ = 1
snake_case__ = True
snake_case__ = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = []
for i in range(self.num_layers ):
UpperCamelCase = self.in_channels if i == 0 else self.out_channels
UpperCamelCase = FlaxResnetBlockaD(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = resnets
if self.add_downsample:
UpperCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any]=True ):
"""simple docstring"""
UpperCamelCase = ()
for resnet in self.resnets:
UpperCamelCase = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
UpperCamelCase = self.downsamplers_a(_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class A__ ( nn.Module ):
'''simple docstring'''
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
snake_case__ = 0.0
snake_case__ = 1
snake_case__ = 1
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
for i in range(self.num_layers ):
UpperCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCamelCase = self.prev_output_channel if i == 0 else self.out_channels
UpperCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = resnets
UpperCamelCase = attentions
if self.add_upsample:
UpperCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any]=True ):
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
UpperCamelCase = res_hidden_states_tuple[-1]
UpperCamelCase = res_hidden_states_tuple[:-1]
UpperCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCamelCase = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
UpperCamelCase = attn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
if self.add_upsample:
UpperCamelCase = self.upsamplers_a(_SCREAMING_SNAKE_CASE )
return hidden_states
class A__ ( nn.Module ):
'''simple docstring'''
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
snake_case__ = 0.0
snake_case__ = 1
snake_case__ = True
snake_case__ = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = []
for i in range(self.num_layers ):
UpperCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCamelCase = self.prev_output_channel if i == 0 else self.out_channels
UpperCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = resnets
if self.add_upsample:
UpperCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict=True ):
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
UpperCamelCase = res_hidden_states_tuple[-1]
UpperCamelCase = res_hidden_states_tuple[:-1]
UpperCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCamelCase = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
if self.add_upsample:
UpperCamelCase = self.upsamplers_a(_SCREAMING_SNAKE_CASE )
return hidden_states
class A__ ( nn.Module ):
'''simple docstring'''
snake_case__ = 42
snake_case__ = 0.0
snake_case__ = 1
snake_case__ = 1
snake_case__ = False
snake_case__ = False
snake_case__ = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
UpperCamelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
UpperCamelCase = []
for _ in range(self.num_layers ):
UpperCamelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = resnets
UpperCamelCase = attentions
def __call__( self : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str=True ):
"""simple docstring"""
UpperCamelCase = self.resnets[0](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
UpperCamelCase = attn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
UpperCamelCase = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
return hidden_states
| 410 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( ) -> list[list[int]]:
"""simple docstring"""
return [list(range(1000 - i, -1000 - i, -1 ) ) for i in range(1000 )]
__UpperCamelCase : List[Any] = generate_large_matrix()
__UpperCamelCase : Tuple = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: list[list[int]] ) -> None:
"""simple docstring"""
assert all(row == sorted(SCREAMING_SNAKE_CASE__, reverse=SCREAMING_SNAKE_CASE__ ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE__ ) == sorted(SCREAMING_SNAKE_CASE__, reverse=SCREAMING_SNAKE_CASE__ ) for col in zip(*SCREAMING_SNAKE_CASE__ ) )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: list[int] ) -> int:
"""simple docstring"""
__a = 0
__a = len(SCREAMING_SNAKE_CASE__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__a = (left + right) // 2
__a = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__a = mid + 1
else:
__a = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE__ )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: list[list[int]] ) -> int:
"""simple docstring"""
__a = 0
__a = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__a = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE__ ) * len(grid[0] )) - total
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: list[list[int]] ) -> int:
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: list[list[int]] ) -> int:
"""simple docstring"""
__a = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE__ ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE__ ) - i
break
return total
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('Running benchmarks' )
__a = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__a = timeit(f"""{func}(grid=grid)""", setup=SCREAMING_SNAKE_CASE__, number=500 )
print(f"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 448 |
'''simple docstring'''
import os
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str = "matrix.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE__ ), SCREAMING_SNAKE_CASE__ ) ) as in_file:
__a = in_file.read()
__a = [[int(SCREAMING_SNAKE_CASE__ ) for cell in row.split(',' )] for row in data.strip().splitlines()]
__a = [[0 for cell in row] for row in grid]
__a = len(grid[0] )
__a = [[0 for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
__a = grid[0][0]
for i in range(1, SCREAMING_SNAKE_CASE__ ):
__a = grid[0][i] + dp[0][i - 1]
for i in range(1, SCREAMING_SNAKE_CASE__ ):
__a = grid[i][0] + dp[i - 1][0]
for i in range(1, SCREAMING_SNAKE_CASE__ ):
for j in range(1, SCREAMING_SNAKE_CASE__ ):
__a = grid[i][j] + min(dp[i - 1][j], dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f"""{solution() = }""") | 448 | 1 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE__:Union[str, Any] = parse(importlib.metadata.version("""torch"""))
def _lowerCamelCase( a , a , a ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}" )
__a = STR_OPERATION_TO_FUNC[operation]
if isinstance(a , a ):
__a = parse(importlib.metadata.version(a ) )
return operation(a , parse(a ) )
def _lowerCamelCase( a , a ):
return compare_versions(a , a , a )
| 67 | """simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 255 , lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_rescale
__a = rescale_factor
__a = do_pad
def a__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
__a = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size["shortest_edge"] * h / w )
__a = self.size["shortest_edge"]
elif w > h:
__a = self.size["shortest_edge"]
__a = int(self.size["shortest_edge"] * w / h )
else:
__a = self.size["shortest_edge"]
__a = self.size["shortest_edge"]
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : List[Any] = DetaImageProcessor if is_vision_available() else None
def a__ ( self ):
__a = DetaImageProcessingTester(self )
@property
def a__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def a__ ( self ):
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def a__ ( self ):
pass
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a__ ( self ):
# prepare image and target
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"image_id": 39769, "annotations": target}
# encode them
__a = DetaImageProcessor()
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def a__ ( self ):
# prepare image, target and masks_path
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
__a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__a = DetaImageProcessor(format="coco_panoptic" )
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
__a = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 67 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = StableDiffusionXLImgaImgPipeline
A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
A : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
A : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=A, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, )
SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=32, )
SCREAMING_SNAKE_CASE : int = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(A )
SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : str = image / 2 + 0.5
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Any = sd_pipe(**A ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
# forward without prompt embeds
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt
SCREAMING_SNAKE_CASE : Optional[int] = 3 * [inputs['prompt']]
SCREAMING_SNAKE_CASE : int = sd_pipe(**A )
SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : str = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop('prompt' )]
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = sd_pipe.encode_prompt(A, negative_prompt=A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(
**A, prompt_embeds=A, negative_prompt_embeds=A, pooled_prompt_embeds=A, negative_pooled_prompt_embeds=A, )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE : str = torch.from_numpy(A ).to(device=A, dtype=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(A )
SCREAMING_SNAKE_CASE : str = pipe(**A ).images
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 28 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = LongformerTokenizer
A : List[str] = True
A : Optional[int] = LongformerTokenizerFast
A : Tuple = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'lower newer'
SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer'
SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A, A )
SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A )
SCREAMING_SNAKE_CASE : int = tokenizer.encode(
'sequence builders', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(
'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.'
SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A, A )
SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A, A )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A, A )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE : Optional[int] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence'
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence'
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Tuple = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A, A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), )
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ):
SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['trim_offsets'], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}"
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
| 28 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( snake_case_ , unittest.TestCase ):
__UpperCAmelCase : Union[str, Any] = CTRLTokenizer
__UpperCAmelCase : Any = False
__UpperCAmelCase : str = False
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case : Optional[int] = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
snake_case : Optional[int] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : Dict = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
snake_case : Dict = {"unk_token": "<unk>"}
snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCamelCase__ ) )
def lowerCamelCase ( self , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
snake_case : List[str] = "adapt react readapt apt"
snake_case : int = "adapt react readapt apt"
return input_text, output_text
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case : str = "adapt react readapt apt"
snake_case : Dict = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
snake_case : List[Any] = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[int] = tokens + [tokenizer.unk_token]
snake_case : str = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
| 117 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Union[str, Any] = '''xmod'''
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=2 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=("en_XX",) , UpperCamelCase__=None , **UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
snake_case : Optional[int] = vocab_size
snake_case : Optional[Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : Dict = hidden_act
snake_case : Union[str, Any] = intermediate_size
snake_case : str = hidden_dropout_prob
snake_case : List[str] = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : Union[str, Any] = type_vocab_size
snake_case : Union[str, Any] = initializer_range
snake_case : str = layer_norm_eps
snake_case : Dict = position_embedding_type
snake_case : Any = use_cache
snake_case : str = classifier_dropout
snake_case : Any = pre_norm
snake_case : Union[str, Any] = adapter_reduction_factor
snake_case : Optional[int] = adapter_layer_norm
snake_case : int = adapter_reuse_layer_norm
snake_case : List[Any] = ln_before_adapter
snake_case : Tuple = list(UpperCamelCase__ )
snake_case : Tuple = default_language
class _lowerCAmelCase ( snake_case_ ):
@property
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 117 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
snake_case : List[Any] = None
snake_case : str = logging.get_logger(__name__)
snake_case : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
snake_case : Dict = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
snake_case : Optional[int] = {
"camembert-base": 512,
}
snake_case : List[Any] = "▁"
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['input_ids', 'attention_mask']
UpperCamelCase__ = CamembertTokenizer
def __init__( self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=["<s>NOTUSED", "</s>NOTUSED"] , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : Tuple = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , )
__magic_name__ : str = vocab_file
__magic_name__ : Any = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : int = [self.cls_token_id]
__magic_name__ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Union[str, Any] = [self.sep_token_id]
__magic_name__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : Dict = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 124 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class _snake_case :
def __init__( self , _a , _a=13 , _a=7 , _a=False , _a=True , _a=False , _a=True , _a=33 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
__magic_name__ : Dict = parent
__magic_name__ : List[str] = batch_size
__magic_name__ : Tuple = seq_length
__magic_name__ : int = is_training
__magic_name__ : Union[str, Any] = use_input_mask
__magic_name__ : str = use_token_type_ids
__magic_name__ : Dict = use_labels
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Tuple = hidden_size
__magic_name__ : Union[str, Any] = num_hidden_layers
__magic_name__ : Union[str, Any] = num_attention_heads
__magic_name__ : str = intermediate_size
__magic_name__ : Tuple = hidden_act
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : int = attention_probs_dropout_prob
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : Optional[int] = type_vocab_size
__magic_name__ : Optional[int] = type_sequence_label_size
__magic_name__ : Dict = initializer_range
__magic_name__ : str = num_labels
__magic_name__ : Tuple = num_choices
__magic_name__ : Optional[Any] = scope
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Union[str, Any] = None
if self.use_input_mask:
__magic_name__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[str] = None
__magic_name__ : List[Any] = None
__magic_name__ : List[str] = None
if self.use_labels:
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a ):
__magic_name__ : Dict = EsmModel(config=_a )
model.to(_a )
model.eval()
__magic_name__ : str = model(_a , attention_mask=_a )
__magic_name__ : List[str] = model(_a )
__magic_name__ : Union[str, Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a ):
__magic_name__ : int = EsmForMaskedLM(config=_a )
model.to(_a )
model.eval()
__magic_name__ : Optional[Any] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a ):
__magic_name__ : int = self.num_labels
__magic_name__ : int = EsmForTokenClassification(config=_a )
model.to(_a )
model.eval()
__magic_name__ : Tuple = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Optional[int] = config_and_inputs
__magic_name__ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = False
UpperCamelCase__ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = ()
UpperCamelCase__ = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = EsmModelTester(self )
__magic_name__ : int = ConfigTester(self , config_class=_a , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ : str = type
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Optional[Any] = EsmModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()[0]
__magic_name__ : List[str] = EsmEmbeddings(config=_a )
__magic_name__ : Dict = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
__magic_name__ : Tuple = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__magic_name__ : Dict = create_position_ids_from_input_ids(_a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_a , _a ) ) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()[0]
__magic_name__ : str = EsmEmbeddings(config=_a )
__magic_name__ : Optional[Any] = torch.empty(2 , 4 , 30 )
__magic_name__ : str = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__magic_name__ : Tuple = torch.as_tensor([expected_single_positions, expected_single_positions] )
__magic_name__ : List[str] = embeddings.create_position_ids_from_inputs_embeds(_a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_a , _a ) ) )
@unittest.skip("Esm does not support embedding resizing" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE ( self ):
pass
@require_torch
class _snake_case ( snake_case ):
@slow
def SCREAMING_SNAKE_CASE ( self ):
with torch.no_grad():
__magic_name__ : Dict = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
__magic_name__ : int = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__magic_name__ : Dict = model(_a )[0]
__magic_name__ : Optional[Any] = 33
__magic_name__ : Optional[int] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , _a )
__magic_name__ : List[Any] = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
with torch.no_grad():
__magic_name__ : Optional[int] = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
__magic_name__ : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__magic_name__ : Tuple = model(_a )[0]
# compare the actual values for a slice.
__magic_name__ : Optional[Any] = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 124 | 1 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
A : Optional[int] = logging.getLogger()
A : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCamelCase( lowercase__ ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : Tuple ) -> Any:
'''simple docstring'''
os.makedirs(__lowercase , exist_ok=__lowercase )
__snake_case = {"source": "What is love ?", "target": "life"}
__snake_case = {"train": 1_2, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__snake_case = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(__lowercase , f'''{split}.{field}''' ) , "w" ) as f:
f.write(__lowercase )
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str = "pytorch" ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.get_auto_remove_tmp_dir()
__snake_case = os.path.join(__lowercase , "output" )
__snake_case = os.path.join(__lowercase , "data" )
self._create_dummy_data(data_dir=__lowercase )
__snake_case = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
__snake_case = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__lowercase , env=self.get_env() )
__snake_case = os.path.join(__lowercase , "metrics.json" )
with open(__lowercase ) as f:
__snake_case = json.load(__lowercase )
return result
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
__snake_case = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__snake_case = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> int:
'''simple docstring'''
__snake_case = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 715 |
class UpperCamelCase:
def __init__( self : Any ) -> Any:
'''simple docstring'''
__snake_case = 0
__snake_case = 0
__snake_case = {}
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if vertex not in self.adjacency:
__snake_case = {}
self.num_vertices += 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
'''simple docstring'''
self.add_vertex(SCREAMING_SNAKE_CASE )
self.add_vertex(SCREAMING_SNAKE_CASE )
if head == tail:
return
__snake_case = weight
__snake_case = weight
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
__snake_case = self.get_edges()
for edge in edges:
__snake_case , __snake_case , __snake_case = edge
edges.remove((tail, head, weight) )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__snake_case = list(edges[i] )
edges.sort(key=lambda SCREAMING_SNAKE_CASE : e[2] )
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__snake_case = edges[i][2] + 1
for edge in edges:
__snake_case , __snake_case , __snake_case = edge
__snake_case = weight
__snake_case = weight
def __str__( self : Tuple ) -> List[Any]:
'''simple docstring'''
__snake_case = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
__snake_case = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
__snake_case = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[Any]:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[Any]=None ) -> int:
'''simple docstring'''
__snake_case = Graph()
if vertices is None:
__snake_case = []
if edges is None:
__snake_case = []
for vertex in vertices:
g.add_vertex(SCREAMING_SNAKE_CASE )
for edge in edges:
g.add_edge(*SCREAMING_SNAKE_CASE )
return g
class UpperCamelCase:
def __init__( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = {}
__snake_case = {}
def __len__( self : List[str] ) -> Dict:
'''simple docstring'''
return len(self.parent )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : int ) -> List[str]:
'''simple docstring'''
if item in self.parent:
return self.find(SCREAMING_SNAKE_CASE )
__snake_case = item
__snake_case = 0
return item
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] ) -> Any:
'''simple docstring'''
if item not in self.parent:
return self.make_set(SCREAMING_SNAKE_CASE )
if item != self.parent[item]:
__snake_case = self.find(self.parent[item] )
return self.parent[item]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
'''simple docstring'''
__snake_case = self.find(SCREAMING_SNAKE_CASE )
__snake_case = self.find(SCREAMING_SNAKE_CASE )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__snake_case = roota
return roota
if self.rank[roota] < self.rank[roota]:
__snake_case = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__snake_case = roota
return roota
return None
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE : str ) -> Any:
'''simple docstring'''
__snake_case = graph.num_vertices
__snake_case = Graph.UnionFind()
__snake_case = []
while num_components > 1:
__snake_case = {}
for vertex in graph.get_vertices():
__snake_case = -1
__snake_case = graph.get_edges()
for edge in edges:
__snake_case , __snake_case , __snake_case = edge
edges.remove((tail, head, weight) )
for edge in edges:
__snake_case , __snake_case , __snake_case = edge
__snake_case = union_find.find(SCREAMING_SNAKE_CASE )
__snake_case = union_find.find(SCREAMING_SNAKE_CASE )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__snake_case = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__snake_case = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__snake_case , __snake_case , __snake_case = cheap_edge[vertex]
if union_find.find(SCREAMING_SNAKE_CASE ) != union_find.find(SCREAMING_SNAKE_CASE ):
union_find.union(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
mst_edges.append(cheap_edge[vertex] )
__snake_case = num_components - 1
__snake_case = Graph.build(edges=SCREAMING_SNAKE_CASE )
return mst
| 473 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase__(A ) ->List[str]:
"""simple docstring"""
warnings.warn(
"The preprocess method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor.preprocess instead" , A_ , )
if isinstance(A_ , torch.Tensor ):
return image
elif isinstance(A_ , PIL.Image.Image ):
lowercase__ : str= [image]
if isinstance(image[0] , PIL.Image.Image ):
lowercase__ : Optional[int]= image[0].size
lowercase__ : str= (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
lowercase__ : str= [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
lowercase__ : int= np.concatenate(A_ , axis=0 )
lowercase__ : int= np.array(A_ ).astype(np.floataa ) / 255.0
lowercase__ : Dict= image.transpose(0 , 3 , 1 , 2 )
lowercase__ : Tuple= 2.0 * image - 1.0
lowercase__ : Any= torch.from_numpy(A_ )
elif isinstance(image[0] , torch.Tensor ):
lowercase__ : Dict= torch.cat(A_ , dim=0 )
return image
def lowercase__(A ) ->str:
"""simple docstring"""
if isinstance(A_ , torch.Tensor ):
return mask
elif isinstance(A_ , PIL.Image.Image ):
lowercase__ : str= [mask]
if isinstance(mask[0] , PIL.Image.Image ):
lowercase__ : Optional[Any]= mask[0].size
lowercase__ : Tuple= (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowercase__ : List[str]= [np.array(m.convert("L" ).resize((w, h) , resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask]
lowercase__ : Tuple= np.concatenate(A_ , axis=0 )
lowercase__ : List[str]= mask.astype(np.floataa ) / 255.0
lowercase__ : List[str]= 0
lowercase__ : Optional[Any]= 1
lowercase__ : List[Any]= torch.from_numpy(A_ )
elif isinstance(mask[0] , torch.Tensor ):
lowercase__ : List[Any]= torch.cat(A_ , dim=0 )
return mask
class __UpperCAmelCase( UpperCamelCase__ ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__ , snake_case__ = 250 , snake_case__ = 0.0 , snake_case__ = 10 , snake_case__ = 10 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : Optional[int]= image
lowercase__ : Tuple= _preprocess_image(__lowerCamelCase )
lowercase__ : Tuple= original_image.to(device=self.device , dtype=self.unet.dtype )
lowercase__ : Dict= _preprocess_mask(__lowerCamelCase )
lowercase__ : List[Any]= mask_image.to(device=self.device , dtype=self.unet.dtype )
lowercase__ : Tuple= original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__lowerCamelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase__ : str= original_image.shape
lowercase__ : str= randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.device )
lowercase__ : List[Any]= eta
lowercase__ : List[str]= self.scheduler.timesteps[0] + 1
lowercase__ : List[str]= generator[0] if isinstance(__lowerCamelCase , __lowerCamelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
lowercase__ : Dict= self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# compute previous image: x_t -> x_t-1
lowercase__ : List[Any]= self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
lowercase__ : List[str]= self.scheduler.undo_step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase__ : Union[str, Any]= t
lowercase__ : Dict= (image / 2 + 0.5).clamp(0 , 1 )
lowercase__ : Union[str, Any]= image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : Tuple= self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 218 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = None
UpperCamelCase__ = None
@property
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: List[str] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__lowerCamelCase , "feature_size" ) )
self.assertTrue(hasattr(__lowerCamelCase , "sampling_rate" ) )
self.assertTrue(hasattr(__lowerCamelCase , "padding_value" ) )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase__: List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase__: str = feat_extract.model_input_names[0]
UpperCamelCase__: Optional[int] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__lowerCamelCase ) == len(__lowerCamelCase ) for x, y in zip(__lowerCamelCase , processed_features[input_name] ) ) )
UpperCamelCase__: Dict = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__lowerCamelCase )
UpperCamelCase__: Any = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
UpperCamelCase__: List[str] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase__: str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__: Dict = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__lowerCamelCase )
UpperCamelCase__: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase__: List[Any] = feat_extract.model_input_names[0]
UpperCamelCase__: Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
UpperCamelCase__: Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase__: Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__lowerCamelCase )
UpperCamelCase__: Tuple = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase__: List[str] = feat_extract.model_input_names[0]
UpperCamelCase__: Dict = BatchFeature({input_name: speech_inputs} , tensor_type="tf" )
UpperCamelCase__: int = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase__: int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: int=False ):
'''simple docstring'''
def _inputs_have_equal_length(__lowerCamelCase: Optional[int] ):
UpperCamelCase__: Optional[int] = len(input[0] )
for input_slice in input[1:]:
if len(__lowerCamelCase ) != length:
return False
return True
def _inputs_are_equal(__lowerCamelCase: List[str] , __lowerCamelCase: str ):
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
return False
for input_slice_a, input_slice_a in zip(__lowerCamelCase , __lowerCamelCase ):
if not np.allclose(np.asarray(__lowerCamelCase ) , np.asarray(__lowerCamelCase ) , atol=1e-3 ):
return False
return True
UpperCamelCase__: List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase__: Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(numpify=__lowerCamelCase )
UpperCamelCase__: Optional[int] = feat_extract.model_input_names[0]
UpperCamelCase__: Dict = BatchFeature({input_name: speech_inputs} )
UpperCamelCase__: Dict = self.feat_extract_tester.seq_length_diff
UpperCamelCase__: List[Any] = self.feat_extract_tester.max_seq_length + pad_diff
UpperCamelCase__: List[str] = self.feat_extract_tester.min_seq_length
UpperCamelCase__: str = self.feat_extract_tester.batch_size
UpperCamelCase__: Tuple = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
UpperCamelCase__: int = feat_extract.pad(__lowerCamelCase , padding=__lowerCamelCase )
UpperCamelCase__: List[str] = input_a[input_name]
UpperCamelCase__: Dict = feat_extract.pad(__lowerCamelCase , padding="longest" )
UpperCamelCase__: int = input_a[input_name]
UpperCamelCase__: int = feat_extract.pad(__lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[-1] ) )
UpperCamelCase__: Union[str, Any] = input_a[input_name]
UpperCamelCase__: Any = feat_extract.pad(__lowerCamelCase , padding="longest" , return_tensors="np" )
UpperCamelCase__: Dict = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(__lowerCamelCase ):
feat_extract.pad(__lowerCamelCase , padding="max_length" )[input_name]
UpperCamelCase__: Optional[Any] = feat_extract.pad(
__lowerCamelCase , padding="max_length" , max_length=__lowerCamelCase , return_tensors="np" )
UpperCamelCase__: Tuple = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertTrue(_inputs_are_equal(__lowerCamelCase , __lowerCamelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
UpperCamelCase__: int = feat_extract.pad(__lowerCamelCase , pad_to_multiple_of=10 )
UpperCamelCase__: List[str] = input_a[input_name]
UpperCamelCase__: Optional[int] = feat_extract.pad(__lowerCamelCase , padding="longest" , pad_to_multiple_of=10 )
UpperCamelCase__: int = input_a[input_name]
UpperCamelCase__: Tuple = feat_extract.pad(
__lowerCamelCase , padding="max_length" , pad_to_multiple_of=10 , max_length=__lowerCamelCase )
UpperCamelCase__: Tuple = input_a[input_name]
UpperCamelCase__: Optional[int] = feat_extract.pad(
__lowerCamelCase , padding="max_length" , pad_to_multiple_of=10 , max_length=__lowerCamelCase , return_tensors="np" , )
UpperCamelCase__: Any = input_a[input_name]
self.assertTrue(all(len(__lowerCamelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(__lowerCamelCase , __lowerCamelCase ) )
UpperCamelCase__: Dict = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(__lowerCamelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
UpperCamelCase__: List[str] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def UpperCAmelCase_ ( self: Union[str, Any] , __lowerCamelCase: Dict=False ):
'''simple docstring'''
def _inputs_have_equal_length(__lowerCamelCase: Dict ):
UpperCamelCase__: Optional[int] = len(input[0] )
for input_slice in input[1:]:
if len(__lowerCamelCase ) != length:
return False
return True
def _inputs_are_equal(__lowerCamelCase: Tuple , __lowerCamelCase: str ):
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
return False
for input_slice_a, input_slice_a in zip(__lowerCamelCase , __lowerCamelCase ):
if not np.allclose(np.asarray(__lowerCamelCase ) , np.asarray(__lowerCamelCase ) , atol=1e-3 ):
return False
return True
UpperCamelCase__: Any = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase__: Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=__lowerCamelCase )
UpperCamelCase__: Optional[Any] = feat_extract.model_input_names[0]
UpperCamelCase__: List[str] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
UpperCamelCase__: List[str] = feat_extract.pad(
__lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , truncation=__lowerCamelCase )
UpperCamelCase__: Dict = input_a[input_name]
UpperCamelCase__: Optional[Any] = feat_extract.pad(__lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) )
UpperCamelCase__: str = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertFalse(_inputs_have_equal_length(__lowerCamelCase ) )
# truncate to smallest with np
UpperCamelCase__: Optional[Any] = feat_extract.pad(
__lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" , truncation=__lowerCamelCase , )
UpperCamelCase__: Dict = input_a[input_name]
UpperCamelCase__: Optional[int] = feat_extract.pad(
__lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" )
UpperCamelCase__: Union[str, Any] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__lowerCamelCase ) )
# truncate to middle
UpperCamelCase__: List[Any] = feat_extract.pad(
__lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=__lowerCamelCase , return_tensors="np" , )
UpperCamelCase__: str = input_a[input_name]
UpperCamelCase__: Union[str, Any] = feat_extract.pad(
__lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=__lowerCamelCase )
UpperCamelCase__: Tuple = input_a[input_name]
UpperCamelCase__: Optional[int] = feat_extract.pad(
__lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , return_tensors="np" )
UpperCamelCase__: Union[str, Any] = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertTrue(_inputs_are_equal(__lowerCamelCase , __lowerCamelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__lowerCamelCase ):
feat_extract.pad(__lowerCamelCase , truncation=__lowerCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__lowerCamelCase ):
feat_extract.pad(__lowerCamelCase , padding="longest" , truncation=__lowerCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__lowerCamelCase ):
feat_extract.pad(__lowerCamelCase , padding="longest" , truncation=__lowerCamelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(__lowerCamelCase ):
feat_extract.pad(__lowerCamelCase , padding="max_length" , truncation=__lowerCamelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
UpperCamelCase__: Tuple = 12
UpperCamelCase__: Optional[int] = feat_extract.pad(
__lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__lowerCamelCase , truncation=__lowerCamelCase , )
UpperCamelCase__: str = input_a[input_name]
UpperCamelCase__: str = feat_extract.pad(
__lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__lowerCamelCase , )
UpperCamelCase__: List[Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
UpperCamelCase__: Optional[int] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
UpperCamelCase__: int = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertFalse(_inputs_have_equal_length(__lowerCamelCase ) )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
self._check_padding(numpify=__lowerCamelCase )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
self._check_padding(numpify=__lowerCamelCase )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
self._check_truncation(numpify=__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
self._check_truncation(numpify=__lowerCamelCase )
@require_torch
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: List[str] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase__: Optional[int] = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase__: Union[str, Any] = feat_extract.model_input_names[0]
UpperCamelCase__: Tuple = BatchFeature({input_name: speech_inputs} )
UpperCamelCase__: Tuple = feat_extract.pad(__lowerCamelCase , padding="longest" , return_tensors="np" )[input_name]
UpperCamelCase__: int = feat_extract.pad(__lowerCamelCase , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__: Any = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase__: Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase__: str = feat_extract.model_input_names[0]
UpperCamelCase__: List[Any] = BatchFeature({input_name: speech_inputs} )
UpperCamelCase__: Union[str, Any] = feat_extract.pad(__lowerCamelCase , padding="longest" , return_tensors="np" )[input_name]
UpperCamelCase__: List[Any] = feat_extract.pad(__lowerCamelCase , padding="longest" , return_tensors="tf" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Dict = self.feat_extract_dict
UpperCamelCase__: List[str] = True
UpperCamelCase__: Optional[Any] = self.feature_extraction_class(**__lowerCamelCase )
UpperCamelCase__: Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase__: int = [len(__lowerCamelCase ) for x in speech_inputs]
UpperCamelCase__: List[Any] = feat_extract.model_input_names[0]
UpperCamelCase__: Dict = BatchFeature({input_name: speech_inputs} )
UpperCamelCase__: List[Any] = feat_extract.pad(__lowerCamelCase , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.feat_extract_dict
UpperCamelCase__: List[Any] = True
UpperCamelCase__: Optional[int] = self.feature_extraction_class(**__lowerCamelCase )
UpperCamelCase__: Any = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase__: List[Any] = [len(__lowerCamelCase ) for x in speech_inputs]
UpperCamelCase__: str = feat_extract.model_input_names[0]
UpperCamelCase__: Dict = BatchFeature({input_name: speech_inputs} )
UpperCamelCase__: Optional[int] = min(__lowerCamelCase )
UpperCamelCase__: Tuple = feat_extract.pad(
__lowerCamelCase , padding="max_length" , max_length=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="np" )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 380 | 0 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case ) -> Any:
# Initialise PyTorch model
_UpperCAmelCase = BigBirdConfig.from_json_file(__snake_case )
print(f"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
_UpperCAmelCase = BigBirdForQuestionAnswering(__snake_case )
else:
_UpperCAmelCase = BigBirdForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__snake_case , __snake_case , is_trivia_qa=__snake_case )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__snake_case )
if __name__ == "__main__":
__a: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
__a: Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
) | 713 |
import fire
from utils import calculate_rouge, save_json
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case=None , **__snake_case ) -> List[str]:
_UpperCAmelCase = [x.strip() for x in open(__snake_case ).readlines()]
_UpperCAmelCase = [x.strip() for x in open(__snake_case ).readlines()][: len(__snake_case )]
_UpperCAmelCase = calculate_rouge(__snake_case , __snake_case , **__snake_case )
if save_path is not None:
save_json(__snake_case , __snake_case , indent=__snake_case )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path) | 402 | 0 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> Dict:
a : int = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
a : str = AutoTokenizer.from_pretrained("google/mt5-small" )
a : Any = tokenizer("Hello there" , return_tensors="np" ).input_ids
a : Optional[Any] = tokenizer("Hi I am" , return_tensors="np" ).input_ids
a : Any = shift_tokens_right(lowerCAmelCase__ , model.config.pad_token_id , model.config.decoder_start_token_id )
a : Optional[Any] = model(lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ ).logits
a : int = optax.softmax_cross_entropy(lowerCAmelCase__ , onehot(lowerCAmelCase__ , logits.shape[-1] ) ).mean()
a : Union[str, Any] = -(labels.shape[-1] * loss.item())
a : Union[str, Any] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 633 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=4 , ) -> List[str]:
a : Any = parent
a : str = batch_size
a : str = seq_length
a : List[str] = is_training
a : Optional[int] = use_attention_mask
a : Optional[int] = use_token_type_ids
a : Union[str, Any] = use_labels
a : Optional[int] = vocab_size
a : Optional[Any] = hidden_size
a : List[Any] = num_hidden_layers
a : Optional[int] = num_attention_heads
a : Optional[int] = intermediate_size
a : Tuple = hidden_act
a : List[str] = hidden_dropout_prob
a : Optional[Any] = attention_probs_dropout_prob
a : List[Any] = max_position_embeddings
a : Optional[int] = type_vocab_size
a : Dict = type_sequence_label_size
a : int = initializer_range
a : Tuple = num_choices
def __a ( self ) -> List[Any]:
a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : List[str] = None
if self.use_attention_mask:
a : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a : List[str] = None
if self.use_token_type_ids:
a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __a ( self ) -> List[str]:
a : Dict = self.prepare_config_and_inputs()
a, a, a, a : int = config_and_inputs
a : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Union[str, Any] =True
lowerCamelCase : Union[str, Any] =(
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __a ( self ) -> List[Any]:
a : Dict = FlaxRoFormerModelTester(self )
@slow
def __a ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
a : Any = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=lowerCAmelCase__ )
a : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> List[str]:
a : str = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
a : List[Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
a : Any = model(lowerCAmelCase__ )[0]
a : int = 5_0000
a : Any = (1, 6, vocab_size)
self.assertEqual(output.shape , lowerCAmelCase__ )
a : List[Any] = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 633 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=4 , ):
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = True
UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase__ ( snake_case_, unittest.TestCase ):
'''simple docstring'''
_snake_case = True
_snake_case = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = FlaxBertModelTester(self )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = FlaxBertModel.from_pretrained('''bert-base-cased''' )
UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 716 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( _UpperCAmelCase : list[int]):
UpperCamelCase = len(_UpperCAmelCase) // 2
# choose the middle 3 elements
UpperCamelCase = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m]) == 2:
m -= 1
return peak(lst[m:])
# decreasing
else:
if len(lst[:m]) == 2:
m += 1
return peak(lst[:m])
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 | 0 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
a_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __UpperCamelCase ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 175 |
def __UpperCamelCase ( lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> float:
'''simple docstring'''
lowerCAmelCase_ : Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 600 | 0 |
'''simple docstring'''
def _a ( ):
snake_case : List[str] =[]
snake_case : int =1
while len(lowerCamelCase_ ) < 1e6:
constant.append(str(lowerCamelCase_ ) )
i += 1
snake_case : Tuple =''''''.join(lowerCamelCase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution())
| 716 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
A : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def _a ( lowerCamelCase_ ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
return max(metric_fn(lowerCamelCase_ , lowerCamelCase_ ) for gt in ground_truths )
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Tuple =[line.strip() for line in open(lowerCamelCase_ , '''r''' ).readlines()]
snake_case : Optional[Any] =[]
if args.gold_data_mode == "qa":
snake_case : int =pd.read_csv(lowerCamelCase_ , sep='''\t''' , header=lowerCamelCase_ )
for answer_list in data[1]:
snake_case : int =ast.literal_eval(lowerCamelCase_ )
answers.append(lowerCamelCase_ )
else:
snake_case : Tuple =[line.strip() for line in open(lowerCamelCase_ , '''r''' ).readlines()]
snake_case : Union[str, Any] =[[reference] for reference in references]
snake_case : str =0
for prediction, ground_truths in zip(lowerCamelCase_ , lowerCamelCase_ ):
total += 1
em += metric_max_over_ground_truths(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
fa += metric_max_over_ground_truths(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
snake_case : List[str] =1_00.0 * em / total
snake_case : Union[str, Any] =1_00.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Optional[int] =args.k
snake_case : List[Any] =[line.strip() for line in open(lowerCamelCase_ , '''r''' ).readlines()]
snake_case : Tuple =[line.strip() for line in open(lowerCamelCase_ , '''r''' ).readlines()]
snake_case : int =0
for hypo, reference in zip(lowerCamelCase_ , lowerCamelCase_ ):
snake_case : int =set(hypo.split('''\t''' )[:k] )
snake_case : Union[str, Any] =set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
snake_case : List[str] =1_00.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
def strip_title(lowerCamelCase_ ):
if title.startswith('''"''' ):
snake_case : Optional[int] =title[1:]
if title.endswith('''"''' ):
snake_case : List[Any] =title[:-1]
return title
snake_case : Optional[Any] =rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase_ , return_tensors='''pt''' , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , )['''input_ids'''].to(args.device )
snake_case : str =rag_model.rag.question_encoder(lowerCamelCase_ )
snake_case : Union[str, Any] =question_enc_outputs[0]
snake_case : str =rag_model.retriever(
lowerCamelCase_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
snake_case : Union[str, Any] =rag_model.retriever.index.get_doc_dicts(result.doc_ids )
snake_case : Tuple =[]
for docs in all_docs:
snake_case : Union[str, Any] =[strip_title(lowerCamelCase_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(lowerCamelCase_ ) )
return provenance_strings
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
with torch.no_grad():
snake_case : Any =rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase_ , return_tensors='''pt''' , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )
snake_case : Optional[Any] =inputs_dict.input_ids.to(args.device )
snake_case : Any =inputs_dict.attention_mask.to(args.device )
snake_case : Any =rag_model.generate( # rag_model overwrites generate
lowerCamelCase_ , attention_mask=lowerCamelCase_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCamelCase_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
snake_case : str =rag_model.retriever.generator_tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
if args.print_predictions:
for q, a in zip(lowerCamelCase_ , lowerCamelCase_ ):
logger.info('''Q: {} - A: {}'''.format(lowerCamelCase_ , lowerCamelCase_ ) )
return answers
def _a ( ):
snake_case : str =argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=lowerCamelCase_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=lowerCamelCase_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=lowerCamelCase_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=lowerCamelCase_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=lowerCamelCase_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=lowerCamelCase_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=lowerCamelCase_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=lowerCamelCase_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=lowerCamelCase_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=lowerCamelCase_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=lowerCamelCase_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=lowerCamelCase_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
snake_case : int =parser.parse_args()
snake_case : Any =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def _a ( lowerCamelCase_ ):
snake_case : Optional[int] ={}
if args.model_type is None:
snake_case : str =infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
snake_case : Optional[int] =RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
snake_case : List[Any] =args.n_docs
if args.index_name is not None:
snake_case : Dict =args.index_name
if args.index_path is not None:
snake_case : Any =args.index_path
else:
snake_case : str =BartForConditionalGeneration
snake_case : Dict =(
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , lowerCamelCase_ )
snake_case : Dict =get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
snake_case : Any =evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(lowerCamelCase_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(lowerCamelCase_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
snake_case : List[str] =RagRetriever.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
snake_case : List[str] =model_class.from_pretrained(lowerCamelCase_ , retriever=lowerCamelCase_ , **lowerCamelCase_ )
model.retriever.init_retrieval()
else:
snake_case : Optional[int] =model_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
snake_case : Optional[int] =[]
for line in tqdm(lowerCamelCase_ ):
questions.append(line.strip() )
if len(lowerCamelCase_ ) == args.eval_batch_size:
snake_case : List[str] =evaluate_batch_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
preds_file.write('''\n'''.join(lowerCamelCase_ ) + '''\n''' )
preds_file.flush()
snake_case : Tuple =[]
if len(lowerCamelCase_ ) > 0:
snake_case : str =evaluate_batch_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
preds_file.write('''\n'''.join(lowerCamelCase_ ) )
preds_file.flush()
score_fn(lowerCamelCase_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
A : Union[str, Any] = get_args()
main(args)
| 136 | 0 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Tuple = logging.get_logger(__name__)
_lowercase : List[Any] = "▁"
_lowercase : List[Any] = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
_lowercase : str = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
_lowercase : Optional[int] = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
_lowercase : Optional[int] = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
_lowercase : str = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = ["input_ids"]
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = RESOURCE_FILES_NAMES
def __init__( self , a__ , a__=None , a__=False , a__="utf8" , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__ = None , **a__ , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , vocab_file=a__ , encoding=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
A = do_lower_case
A = sentencepiece_model_ckpt
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
A = self.load_vocab(filepath=a__ )
else:
A = {self.sp_model.id_to_piece(a__ ): id for id in range(self.sp_model.get_piece_size() )}
A = {v: k for k, v in self.vocab.items()}
def _UpperCAmelCase ( self , a__ ) -> List[Any]:
if text is None:
return None
A = self.tokenize(a__ )
A , A = """""", []
for i, ch in enumerate(a__ ):
if ch in self.SP_CHAR_MAPPING:
A = self.SP_CHAR_MAPPING.get(a__ )
else:
A = unicodedata.normalize("""NFKC""" , a__ )
if self.is_whitespace(a__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(a__ ) )
A , A , A = normalized_text, [], 0
if self.do_lower_case:
A = text.lower()
for token in split_tokens:
if token[:1] == "▁":
A = token[1:]
A = text[offset:].index(a__ ) + offset
A = start + len(a__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
A = end
return token_mapping
@property
def _UpperCAmelCase ( self ) -> Any:
return len(self.vocab )
def _UpperCAmelCase ( self ) -> Any:
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ) -> Optional[int]:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self , a__ ) -> Optional[int]:
A = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _UpperCAmelCase ( self , a__ ) -> Optional[Any]:
return "".join((self.SP_CHAR_MAPPING.get(a__ , a__ ) for c in text) )
def _UpperCAmelCase ( self , a__ , a__=False , a__=64 , a__=0.1 ) -> Union[str, Any]:
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
A = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
A = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
A = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
A = self.sp_model.EncodeAsPieces(a__ )
else:
A = self.sp_model.SampleEncodeAsPieces(a__ , a__ , a__ )
A = []
for pi, piece in enumerate(a__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(a__ ) and pi != 0:
new_pieces.append(a__ )
continue
else:
continue
A = 0
for i, chunk in enumerate(a__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(a__ ) or self.is_punct(a__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(a__ )
A = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
A = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
A = i
if len(a__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _UpperCAmelCase ( self , a__ ) -> str:
A = """""".join(a__ ).replace(a__ , """ """ ).strip()
return out_string
def _UpperCAmelCase ( self , a__ ) -> str:
A = self.convert_ids_to_tokens(a__ )
A = """""".join(a__ ).replace(a__ , """ """ ).strip()
return out_string
def _UpperCAmelCase ( self , a__ ) -> Any:
return self.vocab.get(a__ , self.vocab.get(self.unk_token ) )
def _UpperCAmelCase ( self , a__ ) -> Any:
return self.reverse_vocab.get(a__ , self.unk_token )
def _UpperCAmelCase ( self , a__ , a__=None ) -> Any:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A = [self.cls_token_id]
A = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _UpperCAmelCase ( self , a__ , a__=None ) -> List[str]:
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _UpperCAmelCase ( self , a__ , a__=None , a__=False ) -> Any:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1]
def _UpperCAmelCase ( self , a__ , a__ = None ) -> List[str]:
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(a__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(a__ ) + 1) + [1] * (len(a__ ) + 3)
def _UpperCAmelCase ( self , a__ ) -> List[Any]:
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _UpperCAmelCase ( self , a__ ) -> Optional[int]:
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _UpperCAmelCase ( self , a__ ) -> Any:
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _UpperCAmelCase ( self , a__ ) -> Optional[int]:
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(a__ ) == 1:
A = unicodedata.category(a__ )
if cat == "Zs":
return True
return False
def _UpperCAmelCase ( self , a__ ) -> Optional[Any]:
A = {}
with io.open(a__ , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(a__ ):
A = line.rstrip("""\n""" )
A = int(a__ )
return token_to_idx
def _UpperCAmelCase ( self , a__ , a__ = None ) -> List[str]:
A = 0
if os.path.isdir(a__ ):
A = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
A = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(a__ , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda a__ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
A = token_index
writer.write(token + """\n""" )
index += 1
A = os.path.join(a__ , """sentencepiece.bpe.model""" )
with open(a__ , """wb""" ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (vocab_file,)
| 641 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
A_ = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
a__: int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the training data.'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
a__: Optional[str] = field(default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCamelCase_ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCamelCase_ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCamelCase :
a__: str = field(
default=lowerCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a__: str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCamelCase_ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCamelCase_ = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCamelCase_ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCamelCase_ = load_dataset('''csv''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCamelCase_ = load_dataset('''json''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCamelCase_ = raw_datasets['''train'''].features['''label'''].names
lowerCamelCase_ = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# load tapex tokenizer
lowerCamelCase_ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,add_prefix_space=lowerCAmelCase__ ,)
lowerCamelCase_ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCamelCase_ = {'''Refused''': 0, '''Entailed''': 1}
lowerCamelCase_ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCamelCase_ = min(data_args.max_seq_length ,tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase__ ):
lowerCamelCase_ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCamelCase_ = pd.DataFrame.from_records(_table_content[1:] ,columns=_table_content[0] )
return _table_pd
lowerCamelCase_ = examples['''statement''']
lowerCamelCase_ = list(map(_convert_table_text_to_pandas ,examples['''table_text'''] ) )
lowerCamelCase_ = tokenizer(lowerCAmelCase__ ,lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ )
lowerCamelCase_ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCamelCase_ = raw_datasets.map(
lowerCAmelCase__ ,batched=lowerCAmelCase__ ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on dataset''' ,)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCamelCase_ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCamelCase_ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase__ ) ) ,3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions ,lowerCAmelCase__ ) else p.predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowerCAmelCase__ ,pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,data_collator=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.log_metrics('''eval''' ,lowerCAmelCase__ )
trainer.save_metrics('''eval''' ,lowerCAmelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCamelCase_ = predict_dataset.remove_columns('''label''' )
lowerCamelCase_ = trainer.predict(lowerCAmelCase__ ,metric_key_prefix='''predict''' ).predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir ,'''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ ,'''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ = label_list[item]
writer.write(f"{index}\t{item}\n" )
lowerCamelCase_ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 29 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {"""vocab_file""": """spiece.model"""}
__snake_case = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
__snake_case = {
"""albert-base-v1""": 5_12,
"""albert-large-v1""": 5_12,
"""albert-xlarge-v1""": 5_12,
"""albert-xxlarge-v1""": 5_12,
"""albert-base-v2""": 5_12,
"""albert-large-v2""": 5_12,
"""albert-xlarge-v2""": 5_12,
"""albert-xxlarge-v2""": 5_12,
}
__snake_case = """▁"""
class lowercase__ ( _UpperCAmelCase ):
A__ : Any =VOCAB_FILES_NAMES
A__ : List[str] =PRETRAINED_VOCAB_FILES_MAP
A__ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[Any]="[CLS]" , UpperCAmelCase_ : int="[SEP]" , UpperCAmelCase_ : Tuple="<unk>" , UpperCAmelCase_ : str="[SEP]" , UpperCAmelCase_ : str="<pad>" , UpperCAmelCase_ : int="[CLS]" , UpperCAmelCase_ : int="[MASK]" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE__ = (
AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ , normalized=UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
else mask_token
)
SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = remove_space
SCREAMING_SNAKE_CASE__ = keep_accents
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@property
def A_ ( self : List[Any] ):
return len(self.sp_model )
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
return state
def __setstate__( self : Tuple , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A_ ( self : List[Any] , UpperCAmelCase_ : Tuple ):
if self.remove_space:
SCREAMING_SNAKE_CASE__ = ' '.join(inputs.strip().split() )
else:
SCREAMING_SNAKE_CASE__ = inputs
SCREAMING_SNAKE_CASE__ = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
SCREAMING_SNAKE_CASE__ = unicodedata.normalize('NFKD' , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = ''.join([c for c in outputs if not unicodedata.combining(UpperCAmelCase_ )] )
if self.do_lower_case:
SCREAMING_SNAKE_CASE__ = outputs.lower()
return outputs
def A_ ( self : str , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE__ = self.preprocess_text(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = []
for piece in pieces:
if len(UpperCAmelCase_ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
SCREAMING_SNAKE_CASE__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase_ , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
SCREAMING_SNAKE_CASE__ = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase_ )
else:
new_pieces.append(UpperCAmelCase_ )
return new_pieces
def A_ ( self : Optional[int] , UpperCAmelCase_ : Dict ):
return self.sp_model.PieceToId(UpperCAmelCase_ )
def A_ ( self : Any , UpperCAmelCase_ : List[Any] ):
return self.sp_model.IdToPiece(UpperCAmelCase_ )
def A_ ( self : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = ''
SCREAMING_SNAKE_CASE__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = False
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def A_ ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A_ ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
def A_ ( self : int , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , 'wb' ) as fi:
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 721 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""DeiTFeatureExtractor"""]
__snake_case = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 400 | 0 |
import argparse
import os
import re
import packaging.version
lowercase_ = '''examples/'''
lowercase_ = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
lowercase_ = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
lowercase_ = '''README.md'''
def __lowerCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] ) -> Optional[int]:
with open(__lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__lowerCAmelCase =f.read()
__lowerCAmelCase , __lowerCAmelCase =REPLACE_PATTERNS[pattern]
__lowerCAmelCase =replace.replace("""VERSION""" , __lowerCamelCase )
__lowerCAmelCase =re_pattern.sub(__lowerCamelCase , __lowerCamelCase )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__lowerCamelCase )
def __lowerCAmelCase ( __lowerCamelCase : Union[str, Any] ) -> str:
for folder, directories, fnames in os.walk(__lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase , pattern="""examples""" )
def __lowerCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any]=False ) -> int:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not patch:
update_version_in_examples(__lowerCamelCase )
def __lowerCAmelCase ( ) -> Dict:
__lowerCAmelCase ="""🤗 Transformers currently provides the following architectures"""
__lowerCAmelCase ="""1. Want to contribute a new model?"""
with open(__lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__lowerCAmelCase =f.readlines()
# Find the start of the list.
__lowerCAmelCase =0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCAmelCase =start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__lowerCAmelCase =lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCamelCase )
def __lowerCAmelCase ( ) -> Union[str, Any]:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__lowerCAmelCase =f.read()
__lowerCAmelCase =REPLACE_PATTERNS["""init"""][0].search(__lowerCamelCase ).groups()[0]
return packaging.version.parse(__lowerCamelCase )
def __lowerCAmelCase ( __lowerCamelCase : Any=False ) -> Any:
__lowerCAmelCase =get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__lowerCAmelCase =default_version.base_version
elif patch:
__lowerCAmelCase =f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__lowerCAmelCase =f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__lowerCAmelCase =input(f"""Which version are you releasing? [{default_version}]""" )
if len(__lowerCamelCase ) == 0:
__lowerCAmelCase =default_version
print(f"""Updating version to {version}.""" )
global_version_update(__lowerCamelCase , patch=__lowerCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def __lowerCAmelCase ( ) -> Dict:
__lowerCAmelCase =get_version()
__lowerCAmelCase =f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__lowerCAmelCase =current_version.base_version
# Check with the user we got that right.
__lowerCAmelCase =input(f"""Which version are we developing now? [{dev_version}]""" )
if len(__lowerCamelCase ) == 0:
__lowerCAmelCase =dev_version
print(f"""Updating version to {version}.""" )
global_version_update(__lowerCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
lowercase_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 354 |
from __future__ import annotations
class __a :
def __init__( self : Optional[int] , snake_case_ : int = 0)-> List[str]:
__lowerCAmelCase =key
def UpperCamelCase ( self : Any , snake_case_ : str , snake_case_ : int)-> list[str]:
assert isinstance(snake_case_ , snake_case_) and isinstance(snake_case_ , snake_case_)
__lowerCAmelCase =key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(snake_case_) ^ key) for ch in content]
def UpperCamelCase ( self : Tuple , snake_case_ : str , snake_case_ : int)-> list[str]:
assert isinstance(snake_case_ , snake_case_) and isinstance(snake_case_ , snake_case_)
__lowerCAmelCase =key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(snake_case_) ^ key) for ch in content]
def UpperCamelCase ( self : str , snake_case_ : str , snake_case_ : int = 0)-> str:
assert isinstance(snake_case_ , snake_case_) and isinstance(snake_case_ , snake_case_)
__lowerCAmelCase =key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
__lowerCAmelCase =""""""
for ch in content:
ans += chr(ord(snake_case_) ^ key)
return ans
def UpperCamelCase ( self : Tuple , snake_case_ : str , snake_case_ : int = 0)-> str:
assert isinstance(snake_case_ , snake_case_) and isinstance(snake_case_ , snake_case_)
__lowerCAmelCase =key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
__lowerCAmelCase =""""""
for ch in content:
ans += chr(ord(snake_case_) ^ key)
return ans
def UpperCamelCase ( self : int , snake_case_ : str , snake_case_ : int = 0)-> bool:
assert isinstance(snake_case_ , snake_case_) and isinstance(snake_case_ , snake_case_)
try:
with open(snake_case_) as fin, open("""encrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(snake_case_ , snake_case_))
except OSError:
return False
return True
def UpperCamelCase ( self : Tuple , snake_case_ : str , snake_case_ : int)-> bool:
assert isinstance(snake_case_ , snake_case_) and isinstance(snake_case_ , snake_case_)
try:
with open(snake_case_) as fin, open("""decrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(snake_case_ , snake_case_))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 354 | 1 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowerCamelCase__ = 'true'
def _SCREAMING_SNAKE_CASE( snake_case_ : Tuple , snake_case_ : Union[str, Any]=82 , snake_case_ : Any=16 ) ->Dict:
'''simple docstring'''
set_seed(42 )
_lowercase : str = RegressionModel()
_lowercase : Optional[Any] = deepcopy(snake_case_ )
_lowercase : List[str] = RegressionDataset(length=snake_case_ )
_lowercase : Union[str, Any] = DataLoader(snake_case_ , batch_size=snake_case_ )
model.to(accelerator.device )
_lowercase , _lowercase : List[Any] = accelerator.prepare(snake_case_ , snake_case_ )
return model, ddp_model, dataloader
def _SCREAMING_SNAKE_CASE( snake_case_ : Accelerator , snake_case_ : int=False ) ->List[str]:
'''simple docstring'''
_lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
_lowercase : Optional[int] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(snake_case_ : int ):
_lowercase : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
with accelerator.main_process_first():
_lowercase : int = dataset.map(
snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
_lowercase : str = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : List[str] ):
if use_longest:
return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return DataLoader(snake_case_ , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=16 )
def _SCREAMING_SNAKE_CASE( snake_case_ : Tuple , snake_case_ : List[Any] ) ->int:
'''simple docstring'''
_lowercase : Optional[int] = Accelerator(dispatch_batches=snake_case_ , split_batches=snake_case_ )
_lowercase : List[str] = get_dataloader(snake_case_ , not dispatch_batches )
_lowercase : List[str] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=snake_case_ )
_lowercase , _lowercase : Union[str, Any] = accelerator.prepare(snake_case_ , snake_case_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _SCREAMING_SNAKE_CASE( snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : int ) ->List[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = []
for batch in dataloader:
_lowercase , _lowercase : int = batch.values()
with torch.no_grad():
_lowercase : Dict = model(snake_case_ )
_lowercase , _lowercase : Any = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_lowercase , _lowercase : Any = [], []
for logit, targ in logits_and_targets:
logits.append(snake_case_ )
targs.append(snake_case_ )
_lowercase , _lowercase : Optional[int] = torch.cat(snake_case_ ), torch.cat(snake_case_ )
return logits, targs
def _SCREAMING_SNAKE_CASE( snake_case_ : Accelerator , snake_case_ : List[Any]=82 , snake_case_ : Any=False , snake_case_ : Optional[int]=False , snake_case_ : List[str]=16 ) ->List[str]:
'''simple docstring'''
_lowercase , _lowercase , _lowercase : Tuple = get_basic_setup(snake_case_ , snake_case_ , snake_case_ )
_lowercase , _lowercase : Dict = generate_predictions(snake_case_ , snake_case_ , snake_case_ )
assert (
len(snake_case_ ) == num_samples
), F"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case_ )}"
def _SCREAMING_SNAKE_CASE( snake_case_ : bool = False , snake_case_ : bool = False ) ->Optional[Any]:
'''simple docstring'''
_lowercase : Dict = evaluate.load('''glue''' , '''mrpc''' )
_lowercase , _lowercase : Union[str, Any] = get_mrpc_setup(snake_case_ , snake_case_ )
# First do baseline
_lowercase , _lowercase , _lowercase : int = setup['''no''']
model.to(snake_case_ )
model.eval()
for batch in dataloader:
batch.to(snake_case_ )
with torch.inference_mode():
_lowercase : Tuple = model(**snake_case_ )
_lowercase : Tuple = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case_ , references=batch['''labels'''] )
_lowercase : Tuple = metric.compute()
# Then do distributed
_lowercase , _lowercase , _lowercase : int = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
_lowercase : Optional[Any] = model(**snake_case_ )
_lowercase : List[Any] = outputs.logits.argmax(dim=-1 )
_lowercase : int = batch['''labels''']
_lowercase , _lowercase : int = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case_ , references=snake_case_ )
_lowercase : Dict = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def _SCREAMING_SNAKE_CASE( ) ->Union[str, Any]:
'''simple docstring'''
_lowercase : Union[str, Any] = Accelerator(split_batches=snake_case_ , dispatch_batches=snake_case_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(snake_case_ , snake_case_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_lowercase : Tuple = Accelerator(split_batches=snake_case_ , dispatch_batches=snake_case_ )
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(snake_case_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
_lowercase : Optional[Any] = Accelerator()
test_torch_metrics(snake_case_ , 5_12 )
accelerator.state._reset_state()
def _SCREAMING_SNAKE_CASE( snake_case_ : List[Any] ) ->Dict:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 411 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _SCREAMING_SNAKE_CASE( snake_case_ : Optional[Any]=32 , snake_case_ : List[str]=10 , snake_case_ : Any=1_00 , snake_case_ : List[str]=10_26 , snake_case_ : Dict=True , snake_case_ : Any="data/tokenized_stories_train_wikitext103.jbl" , snake_case_ : Any="igf_context_pairs.jbl" , ) ->List[Any]:
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
_lowercase , _lowercase : List[Any] = generate_datasets(
snake_case_ , snake_case_ , number=snake_case_ , min_len=10_26 , trim=snake_case_ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_lowercase : int = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
_lowercase : Dict = load_gpta('''gpt2''' ).to(snake_case_ )
print('''computing perplexity on objective set''' )
_lowercase : Any = compute_perplexity(snake_case_ , snake_case_ , snake_case_ ).item()
print('''perplexity on objective set:''' , snake_case_ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE( snake_case_ : str , snake_case_ : Optional[Any]=15 , snake_case_ : Dict=1_28 , snake_case_ : Tuple=1_00 , snake_case_ : Union[str, Any]="igf_model.pt" , ) ->List[Any]:
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
_lowercase : Optional[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
_lowercase : List[Any] = SecondaryLearner(snake_case_ )
# Train secondary learner
_lowercase : Any = train_secondary_learner(
snake_case_ , snake_case_ , max_epochs=snake_case_ , batch_size=snake_case_ , eval_freq=1_00 , igf_model_path=snake_case_ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _SCREAMING_SNAKE_CASE( snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : List[Any]=32 , snake_case_ : List[Any]=10_00 , snake_case_ : List[str]=16 , snake_case_ : List[Any]=1.0 , snake_case_ : Optional[Any]=recopy_gpta , snake_case_ : Optional[int]=None , snake_case_ : List[Any]=10 , snake_case_ : Optional[Any]="gpt2_finetuned.pt" , ) ->List[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
_lowercase : Optional[int] = RandomSampler(snake_case_ )
_lowercase : Union[str, Any] = DataLoader(snake_case_ , sampler=snake_case_ )
_lowercase : str = max_steps // (len(snake_case_ )) + 1
_lowercase : Optional[int] = 0
_lowercase : Optional[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=snake_case_ )
_lowercase , _lowercase , _lowercase : int = recopy_model(snake_case_ , snake_case_ , snake_case_ )
model.train()
if secondary_learner is not None:
secondary_learner.to(snake_case_ )
secondary_learner.eval()
_lowercase : List[Any] = []
_lowercase : Optional[int] = 0
_lowercase : List[Any] = []
_lowercase : Any = []
# Compute the performance of the transformer model at the beginning
_lowercase : Tuple = compute_perplexity(snake_case_ , snake_case_ , snake_case_ )
test_perps.append(snake_case_ )
print('''Test perplexity, step''' , snake_case_ , ''':''' , snake_case_ )
for epoch in range(int(snake_case_ ) ):
for step, example in enumerate(snake_case_ ):
torch.cuda.empty_cache()
_lowercase : int = random.randint(0 , example.size(2 ) - context_len - 1 )
_lowercase : Tuple = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_lowercase : Any = model(snake_case_ , labels=snake_case_ )
_lowercase : int = True
if secondary_learner is not None:
_lowercase : List[Any] = secondary_learner.forward(
torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(snake_case_ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
_lowercase : Any = -1
if predicted_q < threshold:
_lowercase : List[str] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_lowercase : Dict = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_lowercase : Optional[Any] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_lowercase : Union[str, Any] = compute_perplexity(snake_case_ , snake_case_ , snake_case_ )
test_perps.append(snake_case_ )
print('''Test perplexity, step''' , snake_case_ , ''':''' , snake_case_ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , snake_case_ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _SCREAMING_SNAKE_CASE( ) ->List[Any]:
'''simple docstring'''
_lowercase : Tuple = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=snake_case_ , default=snake_case_ , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=snake_case_ , default=snake_case_ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=snake_case_ , type=snake_case_ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=snake_case_ , default=snake_case_ , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=snake_case_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=1_00 , type=snake_case_ , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=1_00 , type=snake_case_ , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=10_00 , type=snake_case_ , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=1_28 , type=snake_case_ , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=snake_case_ , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=snake_case_ , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=1_00 , type=snake_case_ , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=10_26 , type=snake_case_ , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=snake_case_ , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=snake_case_ , type=snake_case_ , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=snake_case_ , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=snake_case_ , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=snake_case_ , type=snake_case_ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=snake_case_ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
_lowercase : List[str] = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
_lowercase : Optional[int] = training_secondary_learner(
snake_case_ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
_lowercase : Dict = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
_lowercase , _lowercase : Any = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=1_00 , min_len=10_26 , trim=snake_case_ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
snake_case_ , snake_case_ , snake_case_ , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=snake_case_ , secondary_learner=snake_case_ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 411 | 1 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
SCREAMING_SNAKE_CASE : Optional[Any] = 6_37_81_37.0
SCREAMING_SNAKE_CASE : Optional[int] = 6_35_67_52.31_42_45
SCREAMING_SNAKE_CASE : Tuple = 6_378_137
def UpperCamelCase ( _a , _a , _a , _a ) -> float:
'''simple docstring'''
lowercase_ :Tuple = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowercase_ :List[Any] = atan((1 - flattening) * tan(radians(__UpperCamelCase ) ) )
lowercase_ :List[str] = atan((1 - flattening) * tan(radians(__UpperCamelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowercase_ :str = haversine_distance(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowercase_ :Union[str, Any] = (b_lata + b_lata) / 2
lowercase_ :Union[str, Any] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowercase_ :Optional[int] = (sin(__UpperCamelCase ) ** 2) * (cos(__UpperCamelCase ) ** 2)
lowercase_ :Tuple = cos(sigma / 2 ) ** 2
lowercase_ :int = (sigma - sin(__UpperCamelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowercase_ :Dict = (cos(__UpperCamelCase ) ** 2) * (sin(__UpperCamelCase ) ** 2)
lowercase_ :str = sin(sigma / 2 ) ** 2
lowercase_ :List[Any] = (sigma + sin(__UpperCamelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = DistilBertTokenizer
_lowerCamelCase = DistilBertTokenizerFast
_lowerCamelCase = True
@slow
def lowerCAmelCase__ ( self ):
__magic_name__ = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
__magic_name__ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase_ )
__magic_name__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase_ )
__magic_name__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
__magic_name__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 490 | 0 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def A ( UpperCAmelCase , UpperCAmelCase ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase , UpperCAmelCase ) ) )
def A ( UpperCAmelCase , UpperCAmelCase ):
if dataset.ndim != value_array.ndim:
_snake_case : List[str] = (
"Wrong input data's dimensions... "
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(UpperCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_snake_case : List[Any] = (
"Wrong input data's shape... "
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(UpperCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_snake_case : Optional[int] = (
"Input data have different datatype... "
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(UpperCAmelCase )
_snake_case : Optional[Any] = []
for value in value_array:
_snake_case : Tuple = euclidean(UpperCAmelCase , dataset[0] )
_snake_case : Optional[int] = dataset[0].tolist()
for dataset_value in dataset[1:]:
_snake_case : str = euclidean(UpperCAmelCase , UpperCAmelCase )
if dist > temp_dist:
_snake_case : int = temp_dist
_snake_case : Optional[Any] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def A ( UpperCAmelCase , UpperCAmelCase ):
return np.dot(UpperCAmelCase , UpperCAmelCase ) / (norm(UpperCAmelCase ) * norm(UpperCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
def A ( UpperCAmelCase ):
return str(UpperCAmelCase ) == str(UpperCAmelCase )[::-1]
def A ( UpperCAmelCase ):
return int(UpperCAmelCase ) + int(str(UpperCAmelCase )[::-1] )
def A ( UpperCAmelCase = 10_000 ):
_snake_case : Optional[int] = []
for num in range(1 , UpperCAmelCase ):
_snake_case : Optional[Any] = 0
_snake_case : Optional[int] = num
while iterations < 50:
_snake_case : Optional[Any] = sum_reverse(UpperCAmelCase )
iterations += 1
if is_palindrome(UpperCAmelCase ):
break
else:
lychrel_nums.append(UpperCAmelCase )
return len(UpperCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''') | 278 | 0 |
from __future__ import annotations
A = []
def __UpperCAmelCase ( __A , __A , __A ) -> bool:
'''simple docstring'''
for i in range(len(__A ) ):
if board[row][i] == 1:
return False
for i in range(len(__A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ):
if board[i][j] == 1:
return False
return True
def __UpperCAmelCase ( __A , __A ) -> bool:
'''simple docstring'''
if row >= len(__A ):
solution.append(__A )
printboard(__A )
print()
return True
for i in range(len(__A ) ):
if is_safe(__A , __A , __A ):
UpperCAmelCase__ = 1
solve(__A , row + 1 )
UpperCAmelCase__ = 0
return False
def __UpperCAmelCase ( __A ) -> None:
'''simple docstring'''
for i in range(len(__A ) ):
for j in range(len(__A ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
A = 8
A = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 475 |
import argparse
import datetime
def __UpperCAmelCase ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
UpperCAmelCase__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__A ) < 1_1:
raise ValueError("Must be 10 characters long" )
# Get month
UpperCAmelCase__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 1_3:
raise ValueError("Month must be between 1 - 12" )
UpperCAmelCase__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get day
UpperCAmelCase__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 3_2:
raise ValueError("Date must be between 1 - 31" )
# Get second separator
UpperCAmelCase__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get year
UpperCAmelCase__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?" )
# Get datetime obj for validation
UpperCAmelCase__ = datetime.date(int(__A ) , int(__A ) , int(__A ) )
# Start math
if m <= 2:
UpperCAmelCase__ = y - 1
UpperCAmelCase__ = m + 1_2
# maths var
UpperCAmelCase__ = int(str(__A )[:2] )
UpperCAmelCase__ = int(str(__A )[2:] )
UpperCAmelCase__ = int(2.6 * m - 5.39 )
UpperCAmelCase__ = int(c / 4 )
UpperCAmelCase__ = int(k / 4 )
UpperCAmelCase__ = int(d + k )
UpperCAmelCase__ = int(t + u + v + x )
UpperCAmelCase__ = int(z - (2 * c) )
UpperCAmelCase__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer." )
# Response
UpperCAmelCase__ = F"""Your date {date_input}, is a {days[str(__A )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
A = argparse.ArgumentParser(
description=(
"Find out what day of the week nearly any date is or was. Enter "
"date as a string in the mm-dd-yyyy or mm/dd/yyyy format"
)
)
parser.add_argument(
"date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)"
)
A = parser.parse_args()
zeller(args.date_input)
| 475 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Dict =KandinskyInpaintPipeline
UpperCamelCase__ : List[str] =["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCamelCase__ : str =[
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCamelCase__ : str =[
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCamelCase__ : Optional[int] =False
@property
def __a ( self :Any) -> List[Any]:
return 32
@property
def __a ( self :Dict) -> Optional[Any]:
return 32
@property
def __a ( self :Any) -> Any:
return self.time_input_dim
@property
def __a ( self :Optional[Any]) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __a ( self :Optional[int]) -> str:
return 100
@property
def __a ( self :Optional[Any]) -> Dict:
UpperCAmelCase_ = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''')
return tokenizer
@property
def __a ( self :Any) -> List[Any]:
torch.manual_seed(0)
UpperCAmelCase_ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
UpperCAmelCase_ = MultilingualCLIP(_lowercase)
UpperCAmelCase_ = text_encoder.eval()
return text_encoder
@property
def __a ( self :Union[str, Any]) -> Optional[int]:
torch.manual_seed(0)
UpperCAmelCase_ = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase_ = UNetaDConditionModel(**_lowercase)
return model
@property
def __a ( self :Dict) -> str:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __a ( self :Optional[int]) -> List[str]:
torch.manual_seed(0)
UpperCAmelCase_ = VQModel(**self.dummy_movq_kwargs)
return model
def __a ( self :Any) -> Optional[Any]:
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = self.dummy_tokenizer
UpperCAmelCase_ = self.dummy_unet
UpperCAmelCase_ = self.dummy_movq
UpperCAmelCase_ = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_lowercase , )
UpperCAmelCase_ = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __a ( self :int , _lowercase :str , _lowercase :Any=0) -> Optional[Any]:
UpperCAmelCase_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowercase)).to(_lowercase)
UpperCAmelCase_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(_lowercase)
# create init_image
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase)).to(_lowercase)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_lowercase)).convert('''RGB''').resize((256, 256))
# create mask
UpperCAmelCase_ = np.ones((64, 64) , dtype=np.floataa)
UpperCAmelCase_ = 0
if str(_lowercase).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_lowercase)
else:
UpperCAmelCase_ = torch.Generator(device=_lowercase).manual_seed(_lowercase)
UpperCAmelCase_ = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __a ( self :Dict) -> List[str]:
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_lowercase)
UpperCAmelCase_ = pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = pipe(**self.get_dummy_inputs(_lowercase))
UpperCAmelCase_ = output.images
UpperCAmelCase_ = pipe(
**self.get_dummy_inputs(_lowercase) , return_dict=_lowercase , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}")
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def __a ( self :int) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def __a ( self :Dict) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :Any) -> str:
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''')
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''')
UpperCAmelCase_ = np.ones((768, 768) , dtype=np.floataa)
UpperCAmelCase_ = 0
UpperCAmelCase_ = '''a hat'''
UpperCAmelCase_ = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa)
pipe_prior.to(_lowercase)
UpperCAmelCase_ = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa)
UpperCAmelCase_ = pipeline.to(_lowercase)
pipeline.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ , UpperCAmelCase_ = pipe_prior(
_lowercase , generator=_lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase_ = pipeline(
_lowercase , image=_lowercase , mask_image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , generator=_lowercase , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowercase , _lowercase)
| 561 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class a_ ( _snake_case ):
UpperCamelCase__ : torch.FloatTensor
class a_ ( nn.Module ):
def __init__( self :Union[str, Any] , _lowercase :str=3 , _lowercase :List[str]=3 , _lowercase :Dict=("DownEncoderBlock2D",) , _lowercase :Optional[Any]=(64,) , _lowercase :Optional[Any]=2 , _lowercase :Tuple=32 , _lowercase :int="silu" , _lowercase :Union[str, Any]=True , ) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ = layers_per_block
UpperCAmelCase_ = torch.nn.Convad(
_lowercase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase_ = None
UpperCAmelCase_ = nn.ModuleList([])
# down
UpperCAmelCase_ = block_out_channels[0]
for i, down_block_type in enumerate(_lowercase):
UpperCAmelCase_ = output_channel
UpperCAmelCase_ = block_out_channels[i]
UpperCAmelCase_ = i == len(_lowercase) - 1
UpperCAmelCase_ = get_down_block(
_lowercase , num_layers=self.layers_per_block , in_channels=_lowercase , out_channels=_lowercase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=_lowercase , resnet_groups=_lowercase , attention_head_dim=_lowercase , temb_channels=_lowercase , )
self.down_blocks.append(_lowercase)
# mid
UpperCAmelCase_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=_lowercase , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=_lowercase , temb_channels=_lowercase , )
# out
UpperCAmelCase_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_lowercase , eps=1E-6)
UpperCAmelCase_ = nn.SiLU()
UpperCAmelCase_ = 2 * out_channels if double_z else out_channels
UpperCAmelCase_ = nn.Convad(block_out_channels[-1] , _lowercase , 3 , padding=1)
UpperCAmelCase_ = False
def __a ( self :Any , _lowercase :int) -> Optional[Any]:
UpperCAmelCase_ = x
UpperCAmelCase_ = self.conv_in(_lowercase)
if self.training and self.gradient_checkpointing:
def create_custom_forward(_lowercase :Dict):
def custom_forward(*_lowercase :Any):
return module(*_lowercase)
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0'''):
for down_block in self.down_blocks:
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(_lowercase) , _lowercase , use_reentrant=_lowercase)
# middle
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , _lowercase , use_reentrant=_lowercase)
else:
for down_block in self.down_blocks:
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowercase) , _lowercase)
# middle
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block) , _lowercase)
else:
# down
for down_block in self.down_blocks:
UpperCAmelCase_ = down_block(_lowercase)
# middle
UpperCAmelCase_ = self.mid_block(_lowercase)
# post-process
UpperCAmelCase_ = self.conv_norm_out(_lowercase)
UpperCAmelCase_ = self.conv_act(_lowercase)
UpperCAmelCase_ = self.conv_out(_lowercase)
return sample
class a_ ( nn.Module ):
def __init__( self :Union[str, Any] , _lowercase :Optional[Any]=3 , _lowercase :List[str]=3 , _lowercase :List[str]=("UpDecoderBlock2D",) , _lowercase :int=(64,) , _lowercase :Optional[Any]=2 , _lowercase :List[Any]=32 , _lowercase :Union[str, Any]="silu" , _lowercase :Optional[int]="group" , ) -> Any:
super().__init__()
UpperCAmelCase_ = layers_per_block
UpperCAmelCase_ = nn.Convad(
_lowercase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase_ = None
UpperCAmelCase_ = nn.ModuleList([])
UpperCAmelCase_ = in_channels if norm_type == '''spatial''' else None
# mid
UpperCAmelCase_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=_lowercase , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_lowercase , temb_channels=_lowercase , )
# up
UpperCAmelCase_ = list(reversed(_lowercase))
UpperCAmelCase_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_lowercase):
UpperCAmelCase_ = output_channel
UpperCAmelCase_ = reversed_block_out_channels[i]
UpperCAmelCase_ = i == len(_lowercase) - 1
UpperCAmelCase_ = get_up_block(
_lowercase , num_layers=self.layers_per_block + 1 , in_channels=_lowercase , out_channels=_lowercase , prev_output_channel=_lowercase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=_lowercase , resnet_groups=_lowercase , attention_head_dim=_lowercase , temb_channels=_lowercase , resnet_time_scale_shift=_lowercase , )
self.up_blocks.append(_lowercase)
UpperCAmelCase_ = output_channel
# out
if norm_type == "spatial":
UpperCAmelCase_ = SpatialNorm(block_out_channels[0] , _lowercase)
else:
UpperCAmelCase_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_lowercase , eps=1E-6)
UpperCAmelCase_ = nn.SiLU()
UpperCAmelCase_ = nn.Convad(block_out_channels[0] , _lowercase , 3 , padding=1)
UpperCAmelCase_ = False
def __a ( self :Union[str, Any] , _lowercase :Dict , _lowercase :List[Any]=None) -> Any:
UpperCAmelCase_ = z
UpperCAmelCase_ = self.conv_in(_lowercase)
UpperCAmelCase_ = next(iter(self.up_blocks.parameters())).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_lowercase :str):
def custom_forward(*_lowercase :Any):
return module(*_lowercase)
return custom_forward
if is_torch_version('''>=''' , '''1.11.0'''):
# middle
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , _lowercase , _lowercase , use_reentrant=_lowercase)
UpperCAmelCase_ = sample.to(_lowercase)
# up
for up_block in self.up_blocks:
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(_lowercase) , _lowercase , _lowercase , use_reentrant=_lowercase)
else:
# middle
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , _lowercase , _lowercase)
UpperCAmelCase_ = sample.to(_lowercase)
# up
for up_block in self.up_blocks:
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowercase) , _lowercase , _lowercase)
else:
# middle
UpperCAmelCase_ = self.mid_block(_lowercase , _lowercase)
UpperCAmelCase_ = sample.to(_lowercase)
# up
for up_block in self.up_blocks:
UpperCAmelCase_ = up_block(_lowercase , _lowercase)
# post-process
if latent_embeds is None:
UpperCAmelCase_ = self.conv_norm_out(_lowercase)
else:
UpperCAmelCase_ = self.conv_norm_out(_lowercase , _lowercase)
UpperCAmelCase_ = self.conv_act(_lowercase)
UpperCAmelCase_ = self.conv_out(_lowercase)
return sample
class a_ ( nn.Module ):
def __init__( self :Union[str, Any] , _lowercase :Optional[int] , _lowercase :Dict , _lowercase :str , _lowercase :str=None , _lowercase :int="random" , _lowercase :Tuple=False , _lowercase :Tuple=True) -> Any:
super().__init__()
UpperCAmelCase_ = n_e
UpperCAmelCase_ = vq_embed_dim
UpperCAmelCase_ = beta
UpperCAmelCase_ = legacy
UpperCAmelCase_ = nn.Embedding(self.n_e , self.vq_embed_dim)
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e)
UpperCAmelCase_ = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap)))
UpperCAmelCase_ = self.used.shape[0]
UpperCAmelCase_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCAmelCase_ = self.re_embed
UpperCAmelCase_ = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices.")
else:
UpperCAmelCase_ = n_e
UpperCAmelCase_ = sane_index_shape
def __a ( self :Dict , _lowercase :Union[str, Any]) -> Tuple:
UpperCAmelCase_ = inds.shape
assert len(_lowercase) > 1
UpperCAmelCase_ = inds.reshape(ishape[0] , -1)
UpperCAmelCase_ = self.used.to(_lowercase)
UpperCAmelCase_ = (inds[:, :, None] == used[None, None, ...]).long()
UpperCAmelCase_ = match.argmax(-1)
UpperCAmelCase_ = match.sum(2) < 1
if self.unknown_index == "random":
UpperCAmelCase_ = torch.randint(0 , self.re_embed , size=new[unknown].shape).to(device=new.device)
else:
UpperCAmelCase_ = self.unknown_index
return new.reshape(_lowercase)
def __a ( self :str , _lowercase :int) -> Optional[Any]:
UpperCAmelCase_ = inds.shape
assert len(_lowercase) > 1
UpperCAmelCase_ = inds.reshape(ishape[0] , -1)
UpperCAmelCase_ = self.used.to(_lowercase)
if self.re_embed > self.used.shape[0]: # extra token
UpperCAmelCase_ = 0 # simply set to zero
UpperCAmelCase_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _lowercase)
return back.reshape(_lowercase)
def __a ( self :Optional[int] , _lowercase :Union[str, Any]) -> Any:
# reshape z -> (batch, height, width, channel) and flatten
UpperCAmelCase_ = z.permute(0 , 2 , 3 , 1).contiguous()
UpperCAmelCase_ = z.view(-1 , self.vq_embed_dim)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCAmelCase_ = torch.argmin(torch.cdist(_lowercase , self.embedding.weight) , dim=1)
UpperCAmelCase_ = self.embedding(_lowercase).view(z.shape)
UpperCAmelCase_ = None
UpperCAmelCase_ = None
# compute loss for embedding
if not self.legacy:
UpperCAmelCase_ = self.beta * torch.mean((z_q.detach() - z) ** 2) + torch.mean((z_q - z.detach()) ** 2)
else:
UpperCAmelCase_ = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean((z_q - z.detach()) ** 2)
# preserve gradients
UpperCAmelCase_ = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCAmelCase_ = z_q.permute(0 , 3 , 1 , 2).contiguous()
if self.remap is not None:
UpperCAmelCase_ = min_encoding_indices.reshape(z.shape[0] , -1) # add batch axis
UpperCAmelCase_ = self.remap_to_used(_lowercase)
UpperCAmelCase_ = min_encoding_indices.reshape(-1 , 1) # flatten
if self.sane_index_shape:
UpperCAmelCase_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3])
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __a ( self :Any , _lowercase :Tuple , _lowercase :Optional[Any]) -> int:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
UpperCAmelCase_ = indices.reshape(shape[0] , -1) # add batch axis
UpperCAmelCase_ = self.unmap_to_all(_lowercase)
UpperCAmelCase_ = indices.reshape(-1) # flatten again
# get quantized latent vectors
UpperCAmelCase_ = self.embedding(_lowercase)
if shape is not None:
UpperCAmelCase_ = z_q.view(_lowercase)
# reshape back to match original input shape
UpperCAmelCase_ = z_q.permute(0 , 3 , 1 , 2).contiguous()
return z_q
class a_ ( _snake_case ):
def __init__( self :Tuple , _lowercase :List[str] , _lowercase :Union[str, Any]=False) -> List[Any]:
UpperCAmelCase_ = parameters
UpperCAmelCase_ , UpperCAmelCase_ = torch.chunk(_lowercase , 2 , dim=1)
UpperCAmelCase_ = torch.clamp(self.logvar , -30.0 , 20.0)
UpperCAmelCase_ = deterministic
UpperCAmelCase_ = torch.exp(0.5 * self.logvar)
UpperCAmelCase_ = torch.exp(self.logvar)
if self.deterministic:
UpperCAmelCase_ = UpperCAmelCase_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype)
def __a ( self :Optional[Any] , _lowercase :Optional[torch.Generator] = None) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
UpperCAmelCase_ = randn_tensor(
self.mean.shape , generator=_lowercase , device=self.parameters.device , dtype=self.parameters.dtype)
UpperCAmelCase_ = self.mean + self.std * sample
return x
def __a ( self :Tuple , _lowercase :int=None) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0])
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2) + self.var - 1.0 - self.logvar , dim=[1, 2, 3])
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __a ( self :Optional[int] , _lowercase :str , _lowercase :Dict=[1, 2, 3]) -> Optional[Any]:
if self.deterministic:
return torch.Tensor([0.0])
UpperCAmelCase_ = np.log(2.0 * np.pi)
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2) / self.var , dim=_lowercase)
def __a ( self :Optional[Any]) -> Optional[int]:
return self.mean
| 561 | 1 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : Any ,a__ : str ,a__ : Dict ) -> int: # noqa: E741
while r - l > 1:
__A : Tuple = (l + r) // 2
if v[m] >= key:
__A : Optional[int] = m
else:
__A : Optional[Any] = m # noqa: E741
return r
def __SCREAMING_SNAKE_CASE ( a__ : Any ) -> int:
if len(a__ ) == 0:
return 0
__A : str = [0] * len(a__ )
__A : List[Any] = 1
__A : List[str] = v[0]
for i in range(1 ,len(a__ ) ):
if v[i] < tail[0]:
__A : Tuple = v[i]
elif v[i] > tail[length - 1]:
__A : List[str] = v[i]
length += 1
else:
__A : int = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | '''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = DebertaTokenizer
UpperCAmelCase__ = True
UpperCAmelCase__ = DebertaTokenizerFast
def snake_case__ ( self : List[str] ) ->List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
_UpperCamelCase : str = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
_UpperCamelCase : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase : Union[str, Any] = {"unk_token": "[UNK]"}
_UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase__ ) )
def snake_case__ ( self : Dict , **lowercase__ : str ) ->List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def snake_case__ ( self : Tuple , lowercase__ : Tuple ) ->Any:
'''simple docstring'''
_UpperCamelCase : List[Any] = "lower newer"
_UpperCamelCase : Optional[Any] = "lower newer"
return input_text, output_text
def snake_case__ ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Dict = self.get_tokenizer()
_UpperCamelCase : List[str] = "lower newer"
_UpperCamelCase : List[Any] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase : Dict = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
_UpperCamelCase : Optional[Any] = tokens + [tokenizer.unk_token]
_UpperCamelCase : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def snake_case__ ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.get_tokenizer()
_UpperCamelCase : Optional[int] = tokenizer("Hello" , "World" )
_UpperCamelCase : Optional[int] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , lowercase__ )
@slow
def snake_case__ ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
_UpperCamelCase : int = tokenizer.encode("sequence builders" , add_special_tokens=lowercase__ )
_UpperCamelCase : Any = tokenizer.encode("multi-sequence build" , add_special_tokens=lowercase__ )
_UpperCamelCase : Any = tokenizer.encode(
"sequence builders" , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
_UpperCamelCase : Dict = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
_UpperCamelCase : Dict = tokenizer.build_inputs_with_special_tokens(lowercase__ )
_UpperCamelCase : Dict = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case__ ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : str = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
_UpperCamelCase : int = tokenizer_class.from_pretrained("microsoft/deberta-base" )
_UpperCamelCase : List[str] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
_UpperCamelCase : Optional[int] = tokenizer(lowercase__ , padding=lowercase__ )
_UpperCamelCase : Union[str, Any] = [tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ ) for seq in encoding["input_ids"]]
# fmt: off
_UpperCamelCase : List[Any] = {
"input_ids": [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_UpperCamelCase : Any = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , lowercase__ )
for expected, decoded in zip(lowercase__ , lowercase__ ):
self.assertEqual(lowercase__ , lowercase__ )
| 435 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = "mobilenet_v2"
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int=3 , SCREAMING_SNAKE_CASE_ : List[str]=224 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1.0 , SCREAMING_SNAKE_CASE_ : List[Any]=8 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=8 , SCREAMING_SNAKE_CASE_ : Tuple=6 , SCREAMING_SNAKE_CASE_ : Optional[Any]=32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Any="relu6" , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Any=0.8 , SCREAMING_SNAKE_CASE_ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0_0_1 , SCREAMING_SNAKE_CASE_ : int=255 , **SCREAMING_SNAKE_CASE_ : Any , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = depth_multiplier
lowerCamelCase__ = depth_divisible_by
lowerCamelCase__ = min_depth
lowerCamelCase__ = expand_ratio
lowerCamelCase__ = output_stride
lowerCamelCase__ = first_layer_is_expansion
lowerCamelCase__ = finegrained_output
lowerCamelCase__ = hidden_act
lowerCamelCase__ = tf_padding
lowerCamelCase__ = classifier_dropout_prob
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = version.parse("1.11" )
@property
def __UpperCAmelCase ( self : Dict ):
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def __UpperCAmelCase ( self : int ):
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def __UpperCAmelCase ( self : List[str] ):
return 1e-4
| 258 |
"""simple docstring"""
from itertools import count
def _A ( __lowercase = 50 ):
"""simple docstring"""
lowerCamelCase__ = [1] * min_block_length
for n in count(__lowercase ):
fill_count_functions.append(1 )
for block_length in range(__lowercase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F'{solution() = }')
| 258 | 1 |
from __future__ import annotations
from typing import Any
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 0 ) -> None:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = row, column
SCREAMING_SNAKE_CASE__ = [[default_value for c in range(__lowerCamelCase )] for r in range(__lowerCamelCase )]
def __str__( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
SCREAMING_SNAKE_CASE__ = 0
for row_vector in self.array:
for obj in row_vector:
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , len(str(__lowerCamelCase ) ) )
SCREAMING_SNAKE_CASE__ = f'''%{max_element_length}s'''
# Make string and return
def single_line(__lowerCamelCase : list[float] ) -> str:
nonlocal string_format_identifier
SCREAMING_SNAKE_CASE__ = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self : Optional[Any] ) -> str:
return str(self )
def lowercase_ ( self : int , __lowerCamelCase : tuple[int, int] ) -> bool:
if not (isinstance(__lowerCamelCase , (list, tuple) ) and len(__lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Tuple , __lowerCamelCase : tuple[int, int] ) -> Any:
assert self.validate_indicies(__lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Dict , __lowerCamelCase : tuple[int, int] , __lowerCamelCase : float ) -> None:
assert self.validate_indicies(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = value
def __add__( self : int , __lowerCamelCase : Matrix ) -> Matrix:
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
SCREAMING_SNAKE_CASE__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE__ = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
SCREAMING_SNAKE_CASE__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE__ = -self[r, c]
return result
def __sub__( self : Dict , __lowerCamelCase : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : List[str] , __lowerCamelCase : int | float | Matrix ) -> Matrix:
if isinstance(__lowerCamelCase , (int, float) ): # Scalar multiplication
SCREAMING_SNAKE_CASE__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE__ = self[r, c] * another
return result
elif isinstance(__lowerCamelCase , __lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
SCREAMING_SNAKE_CASE__ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
SCREAMING_SNAKE_CASE__ = f'''Unsupported type given for another ({type(__lowerCamelCase )})'''
raise TypeError(__lowerCamelCase )
def lowercase_ ( self : Any ) -> Matrix:
SCREAMING_SNAKE_CASE__ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE__ = self[r, c]
return result
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Matrix , __lowerCamelCase : Matrix ) -> Any:
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
SCREAMING_SNAKE_CASE__ = v.transpose()
SCREAMING_SNAKE_CASE__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
SCREAMING_SNAKE_CASE__ = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
SCREAMING_SNAKE_CASE__ = Matrix(3 , 1 , 0 )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = 1, 2, -3
SCREAMING_SNAKE_CASE__ = Matrix(3 , 1 , 0 )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_A , _A )}''' )
def UpperCAmelCase_ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 493 |
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if len(_A ) <= 1:
return [tuple(_A )]
SCREAMING_SNAKE_CASE__ = []
def generate(_A , _A ):
SCREAMING_SNAKE_CASE__ = [0] * n
res.append(tuple(_A ) )
SCREAMING_SNAKE_CASE__ = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = arr[i], arr[0]
else:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = arr[i], arr[c[i]]
res.append(tuple(_A ) )
c[i] += 1
SCREAMING_SNAKE_CASE__ = 0
else:
SCREAMING_SNAKE_CASE__ = 0
i += 1
generate(len(_A ) , _A )
return res
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
_SCREAMING_SNAKE_CASE : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 493 | 1 |
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 47 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCamelCase :
_lowercase : Any = LEDConfig
_lowercase : Any = {}
_lowercase : Optional[Any] = """gelu"""
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Any:
"""simple docstring"""
_snake_case : Dict = parent
_snake_case : Any = batch_size
_snake_case : List[str] = seq_length
_snake_case : Union[str, Any] = is_training
_snake_case : Tuple = use_labels
_snake_case : int = vocab_size
_snake_case : str = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : List[Any] = num_attention_heads
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : Optional[int] = max_position_embeddings
_snake_case : Any = eos_token_id
_snake_case : List[Any] = pad_token_id
_snake_case : Optional[int] = bos_token_id
_snake_case : Any = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case : Any = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case : Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case : Dict = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
_snake_case : Dict = tf.concat(
[tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , )
_snake_case : Dict = global_attention_mask
return config, inputs_dict
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int:
"""simple docstring"""
_snake_case : int = TFLEDModel(config=lowercase__ ).get_decoder()
_snake_case : Union[str, Any] = inputs_dict['''input_ids''']
_snake_case : List[str] = input_ids[:1, :]
_snake_case : Tuple = inputs_dict['''attention_mask'''][:1, :]
_snake_case : Dict = 1
# first forward pass
_snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ )
_snake_case , _snake_case : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : List[Any] = model(lowercase__ , attention_mask=lowercase__ )[0]
_snake_case : Tuple = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : int = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
"""simple docstring"""
if attention_mask is None:
_snake_case : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCamelCase (a__ , a__ , unittest.TestCase ):
_lowercase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowercase : int = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowercase : Dict = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : int = True
_lowercase : List[Any] = False
_lowercase : str = False
_lowercase : Union[str, Any] = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : str = TFLEDModelTester(self )
_snake_case : Union[str, Any] = ConfigTester(self , config_class=lowercase__ )
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = tf.zeros_like(inputs_dict['''attention_mask'''] )
_snake_case : Optional[Any] = 2
_snake_case : Any = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
_snake_case : Dict = True
_snake_case : str = self.model_tester.seq_length
_snake_case : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase__ ):
_snake_case : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase__ ):
_snake_case : int = [t.numpy() for t in outputs.encoder_attentions]
_snake_case : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
_snake_case : Dict = False
_snake_case : Union[str, Any] = False
_snake_case : List[Any] = model_class(lowercase__ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
_snake_case : List[Any] = len(lowercase__ )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
if self.is_encoder_decoder:
_snake_case : Union[str, Any] = model_class(lowercase__ )
_snake_case : List[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_decoder_attentions_output(lowercase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : str = True
_snake_case : Tuple = model_class(lowercase__ )
_snake_case : int = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
# Check attention is always last and order is fine
_snake_case : int = True
_snake_case : List[str] = True
_snake_case : Tuple = model_class(lowercase__ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) )
self.assertEqual(model.config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
pass
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
return tf.constant(lowerCAmelCase_ , dtype=tf.intaa )
UpperCAmelCase : Dict = 1E-4
@slow
@require_tf
class lowerCamelCase (unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
_snake_case : List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
_snake_case : List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Tuple = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
_snake_case : int = model(**lowercase__ )[0]
_snake_case : Dict = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
_snake_case : List[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
_snake_case : Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : List[str] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
_snake_case : Tuple = model(**lowercase__ )[0]
_snake_case : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
_snake_case : Dict = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
| 47 | 1 |
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(SCREAMING_SNAKE_CASE_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 164 |
snake_case_ = [
(1_000, '''M'''),
(900, '''CM'''),
(500, '''D'''),
(400, '''CD'''),
(100, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : int = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1_000}
lowercase__ : List[str] = 0
lowercase__ : int = 0
while place < len(SCREAMING_SNAKE_CASE_ ):
if (place + 1 < len(SCREAMING_SNAKE_CASE_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
lowercase__ : str = []
for arabic, roman in ROMAN:
((lowercase__) , (lowercase__)) : List[str] = divmod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
result.append(roman * factor )
if number == 0:
break
return "".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self ):
__lowerCamelCase = 0
def lowerCamelCase_ ( self ):
__lowerCamelCase = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = Path(UpperCAmelCase ) / """preprocessor_config.json"""
__lowerCamelCase = Path(UpperCAmelCase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(UpperCAmelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCAmelCase , """w""" ) )
__lowerCamelCase = AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase_ ( self ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = Path(UpperCAmelCase ) / """preprocessor_config.json"""
__lowerCamelCase = Path(UpperCAmelCase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(UpperCAmelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCAmelCase , """w""" ) )
__lowerCamelCase = AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__lowerCamelCase = Path(UpperCAmelCase ) / """preprocessor_config.json"""
__lowerCamelCase = Path(UpperCAmelCase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(UpperCAmelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCAmelCase , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__lowerCamelCase = AutoImageProcessor.from_pretrained(UpperCAmelCase ).to_dict()
config_dict.pop("""image_processor_type""" )
__lowerCamelCase = CLIPImageProcessor(**UpperCAmelCase )
# save in new folder
model_config.save_pretrained(UpperCAmelCase )
config.save_pretrained(UpperCAmelCase )
__lowerCamelCase = AutoImageProcessor.from_pretrained(UpperCAmelCase )
# make sure private variable is not incorrectly saved
__lowerCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = Path(UpperCAmelCase ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(UpperCAmelCase , """w""" ) , )
__lowerCamelCase = AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase_ ( self ):
with self.assertRaisesRegex(
UpperCAmelCase , """clip-base is not a local folder and is not a valid model identifier""" ):
__lowerCamelCase = AutoImageProcessor.from_pretrained("""clip-base""" )
def lowerCamelCase_ ( self ):
with self.assertRaisesRegex(
UpperCAmelCase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowerCamelCase = AutoImageProcessor.from_pretrained(UpperCAmelCase , revision="""aaaaaa""" )
def lowerCamelCase_ ( self ):
with self.assertRaisesRegex(
UpperCAmelCase , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__lowerCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase_ ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCAmelCase ):
__lowerCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase ):
__lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCAmelCase )
__lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
__lowerCamelCase = AutoImageProcessor.from_pretrained(UpperCAmelCase , trust_remote_code=UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def lowerCamelCase_ ( self ):
try:
AutoConfig.register("""custom""" , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = Path(UpperCAmelCase ) / """preprocessor_config.json"""
__lowerCamelCase = Path(UpperCAmelCase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(UpperCAmelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCAmelCase , """w""" ) )
__lowerCamelCase = CustomImageProcessor.from_pretrained(UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
__lowerCamelCase = AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self ):
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = True
try:
AutoConfig.register("""custom""" , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# If remote code is not set, the default is to use local
__lowerCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(UpperCAmelCase , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 571 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase__ ( _A: str , _A: str , _A: str , _A: PreTrainedTokenizer , _A: int , _A: Optional[int] = None , ):
'''simple docstring'''
__lowerCamelCase = {}
if train_file is not None:
__lowerCamelCase = [train_file]
if eval_file is not None:
__lowerCamelCase = [eval_file]
if test_file is not None:
__lowerCamelCase = [test_file]
__lowerCamelCase = datasets.load_dataset("""csv""" , data_files=_A )
__lowerCamelCase = list(ds[list(files.keys() )[0]].features.keys() )
__lowerCamelCase = features_name.pop(_A )
__lowerCamelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowerCamelCase = {label: i for i, label in enumerate(_A )}
__lowerCamelCase = tokenizer.model_input_names
__lowerCamelCase = {}
if len(_A ) == 1:
for k in files.keys():
__lowerCamelCase = ds[k].map(
lambda _A : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_A , max_length=_A , padding="""max_length""" ) , batched=_A , )
elif len(_A ) == 2:
for k in files.keys():
__lowerCamelCase = ds[k].map(
lambda _A : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_A , max_length=_A , padding="""max_length""" , ) , batched=_A , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowerCamelCase = {k: v for k, v in ex.items() if k in input_names}
__lowerCamelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowerCamelCase = {k: v for k, v in ex.items() if k in input_names}
__lowerCamelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowerCamelCase = {k: v for k, v in ex.items() if k in input_names}
__lowerCamelCase = labelaid[ex[label_name]]
yield (d, label)
__lowerCamelCase = (
tf.data.Dataset.from_generator(
_A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowerCamelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowerCamelCase = (
tf.data.Dataset.from_generator(
_A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowerCamelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowerCamelCase = (
tf.data.Dataset.from_generator(
_A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowerCamelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_a : str = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
A = field(metadata={'''help''': '''Which column contains the label'''} )
A = field(default=__UpperCamelCase ,metadata={'''help''': '''The path of the training file'''} )
A = field(default=__UpperCamelCase ,metadata={'''help''': '''The path of the development file'''} )
A = field(default=__UpperCamelCase ,metadata={'''help''': '''The path of the test file'''} )
A = field(
default=128 ,metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} ,)
A = field(
default=__UpperCamelCase ,metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
A = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
A = field(
default=__UpperCamelCase ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A = field(
default=__UpperCamelCase ,metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
A = field(default=__UpperCamelCase ,metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
A = field(
default=__UpperCamelCase ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} ,)
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_A , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowerCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_A ) , labelaid=_A , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowerCamelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , )
def compute_metrics(_A: EvalPrediction ) -> Dict:
__lowerCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowerCamelCase = TFTrainer(
model=_A , args=_A , train_dataset=_A , eval_dataset=_A , compute_metrics=_A , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCamelCase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCamelCase = trainer.evaluate()
__lowerCamelCase = os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(_A , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(_A )
return results
if __name__ == "__main__":
main()
| 571 | 1 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase = 1_6
lowercase = 3_2
def __lowerCAmelCase ( UpperCAmelCase__ : Accelerator , UpperCAmelCase__ : DatasetDict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : int = 1_6 ) -> List[str]:
lowerCamelCase_ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCamelCase_ = DatasetDict(
{
"""train""": dataset["""train"""].select(UpperCAmelCase__ ),
"""validation""": dataset["""train"""].select(UpperCAmelCase__ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(UpperCAmelCase__ : str ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCAmelCase__ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ = 1_6
elif accelerator.mixed_precision != "no":
lowerCamelCase_ = 8
else:
lowerCamelCase_ = None
return tokenizer.pad(
UpperCAmelCase__ , padding="""longest""" , max_length=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
lowerCamelCase_ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
lowerCamelCase_ = DataLoader(
tokenized_datasets["""test"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
return train_dataloader, eval_dataloader, test_dataloader
def __lowerCAmelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
# New Code #
lowerCamelCase_ = []
# Download the dataset
lowerCamelCase_ = load_dataset("""glue""" , """mrpc""" )
# Create our splits
lowerCamelCase_ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowerCamelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config["""lr"""]
lowerCamelCase_ = int(config["""num_epochs"""] )
lowerCamelCase_ = int(config["""seed"""] )
lowerCamelCase_ = int(config["""batch_size"""] )
lowerCamelCase_ = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase_ = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase_ = MAX_GPU_BATCH_SIZE
set_seed(UpperCAmelCase__ )
# New Code #
# Create our folds:
lowerCamelCase_ = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
lowerCamelCase_ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(UpperCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = get_fold_dataloaders(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ = AdamW(params=model.parameters() , lr=UpperCAmelCase__ )
# Instantiate scheduler
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Now we train the model
for epoch in range(UpperCAmelCase__ ):
model.train()
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ = model(**UpperCAmelCase__ )
lowerCamelCase_ = outputs.loss
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**UpperCAmelCase__ )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
lowerCamelCase_ , lowerCamelCase_ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , )
lowerCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase__ )
# New Code #
# We also run predictions on the test set at the very end
lowerCamelCase_ = []
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**UpperCAmelCase__ )
lowerCamelCase_ = outputs.logits
lowerCamelCase_ , lowerCamelCase_ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(UpperCAmelCase__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowerCamelCase_ = torch.cat(UpperCAmelCase__ , dim=0 )
lowerCamelCase_ = torch.stack(UpperCAmelCase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowerCamelCase_ = metric.compute(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )
accelerator.print("""Average test metrics from all folds:""" , UpperCAmelCase__ )
def __lowerCAmelCase ( ) -> int:
lowerCamelCase_ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=UpperCAmelCase__ , default=3 , help="""The number of splits to perform across the dataset""" )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 272 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __A:
def __init__( self : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any=1_3 , __UpperCamelCase : Optional[int]=7 , __UpperCamelCase : Any=True , __UpperCamelCase : Dict=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Dict=True , __UpperCamelCase : Union[str, Any]=9_9 , __UpperCamelCase : List[str]=3_2 , __UpperCamelCase : int=2 , __UpperCamelCase : Optional[Any]=4 , __UpperCamelCase : Optional[int]=3_7 , __UpperCamelCase : Optional[Any]="gelu" , __UpperCamelCase : int=0.1 , __UpperCamelCase : str=0.1 , __UpperCamelCase : Tuple=5_1_2 , __UpperCamelCase : Any=1_6 , __UpperCamelCase : Dict=2 , __UpperCamelCase : Optional[Any]=0.02 , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Dict=True , __UpperCamelCase : Any="None" , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : Dict=None , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = relative_attention
lowerCamelCase_ = position_biased_input
lowerCamelCase_ = pos_att_type
lowerCamelCase_ = scope
def lowercase__ ( self : Optional[Any] ):
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : List[str] ):
lowerCamelCase_ = TFDebertaVaModel(config=__UpperCamelCase )
lowerCamelCase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : int ):
lowerCamelCase_ = TFDebertaVaForMaskedLM(config=__UpperCamelCase )
lowerCamelCase_ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : List[Any] ):
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFDebertaVaForSequenceClassification(config=__UpperCamelCase )
lowerCamelCase_ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : int , __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ):
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFDebertaVaForTokenClassification(config=__UpperCamelCase )
lowerCamelCase_ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : int ):
lowerCamelCase_ = TFDebertaVaForQuestionAnswering(config=__UpperCamelCase )
lowerCamelCase_ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Dict ):
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __A( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowercase__ ( self : Dict ):
lowerCamelCase_ = TFDebertaVaModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase__ ( self : Dict ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def lowercase__ ( self : Tuple ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def lowercase__ ( self : Any ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def lowercase__ ( self : int ):
lowerCamelCase_ = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class __A( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def lowercase__ ( self : Optional[int] ):
pass
@slow
def lowercase__ ( self : Any ):
lowerCamelCase_ = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
lowerCamelCase_ = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCamelCase_ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
lowerCamelCase_ = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1E-4 )
| 272 | 1 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Any:
if isinstance(lowercase , lowercase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(lowercase , lowercase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_a = False
if num < 0:
_a = True
_a = -num
_a = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(lowercase ) for e in binary )
return "0b" + "".join(str(lowercase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Any , *__a : Optional[Any] , **__a : Any ):
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 521 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
SCREAMING_SNAKE_CASE : Union[str, Any] = get_tests_dir("fixtures")
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# A mock response for an HTTP head request to emulate server down
lowercase_ :str = mock.Mock()
lowercase_ :Dict = 500
lowercase_ :List[Any] = {}
lowercase_ :str = HTTPError
lowercase_ :Optional[int] = {}
# Download this model to make sure it's in the cache.
lowercase_ :Any = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=UpperCamelCase_ ) as mock_head:
lowercase_ :Dict = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase ( self ):
# This test is for deprecated behavior and can be removed in v5
lowercase_ :Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase ( cls ):
lowercase_ :Optional[Any] = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def UpperCamelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def UpperCamelCase ( self ):
lowercase_ :int = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
lowercase_ :List[Any] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase_ , repo_id='''test-feature-extractor''' , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
lowercase_ :Optional[int] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def UpperCamelCase ( self ):
lowercase_ :Tuple = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
lowercase_ :str = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase_ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
lowercase_ :Optional[Any] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def UpperCamelCase ( self ):
CustomFeatureExtractor.register_for_auto_class()
lowercase_ :str = CustomFeatureExtractor.from_pretrained(UpperCamelCase_ )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
lowercase_ :str = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=UpperCamelCase_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 257 |
from typing import Any
import numpy as np
def UpperCamelCase ( _a ) -> bool:
'''simple docstring'''
return np.array_equal(_a , matrix.conjugate().T )
def UpperCamelCase ( _a , _a ) -> Any:
'''simple docstring'''
lowercase_ :str = v.conjugate().T
lowercase_ :int = v_star.dot(_a )
assert isinstance(_a , np.ndarray )
return (v_star_dot.dot(_a )) / (v_star.dot(_a ))
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase_ :str = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowercase_ :Optional[int] = np.array([[1], [2], [3]] )
assert is_hermitian(_a ), f"{a} is not hermitian."
print(rayleigh_quotient(_a , _a ) )
lowercase_ :Any = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_a ), f"{a} is not hermitian."
assert rayleigh_quotient(_a , _a ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 257 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class _a ( _lowerCamelCase ):
'''simple docstring'''
lowerCamelCase_ : List[str] = """luke"""
def __init__( self , __UpperCAmelCase=50_267 , __UpperCAmelCase=500_000 , __UpperCAmelCase=768 , __UpperCAmelCase=256 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3_072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-12 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
__A : Optional[Any] = vocab_size
__A : Any = entity_vocab_size
__A : Optional[Any] = hidden_size
__A : Any = entity_emb_size
__A : List[str] = num_hidden_layers
__A : Dict = num_attention_heads
__A : Any = hidden_act
__A : List[str] = intermediate_size
__A : Optional[int] = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : int = max_position_embeddings
__A : Union[str, Any] = type_vocab_size
__A : Optional[int] = initializer_range
__A : Optional[Any] = layer_norm_eps
__A : Union[str, Any] = use_entity_aware_attention
__A : List[Any] = classifier_dropout
| 700 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """gpt_neox"""
def __init__( self , __UpperCAmelCase=50_432 , __UpperCAmelCase=6_144 , __UpperCAmelCase=44 , __UpperCAmelCase=64 , __UpperCAmelCase=24_576 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.25 , __UpperCAmelCase=10_000 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=2_048 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ):
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
__A : Optional[int] = vocab_size
__A : List[Any] = max_position_embeddings
__A : Any = hidden_size
__A : str = num_hidden_layers
__A : List[str] = num_attention_heads
__A : Dict = intermediate_size
__A : List[Any] = hidden_act
__A : Tuple = rotary_pct
__A : Optional[int] = rotary_emb_base
__A : int = attention_dropout
__A : Optional[int] = hidden_dropout
__A : List[Any] = classifier_dropout
__A : Optional[Any] = initializer_range
__A : Optional[int] = layer_norm_eps
__A : str = use_cache
__A : Optional[int] = tie_word_embeddings
__A : Any = use_parallel_residual
__A : List[Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def __UpperCAmelCase( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"got {self.rope_scaling}" )
__A : Dict = self.rope_scaling.get("type" , __UpperCAmelCase )
__A : Dict = self.rope_scaling.get("factor" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 387 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = DebertaTokenizer
_UpperCamelCase = True
_UpperCamelCase = DebertaTokenizerFast
def UpperCamelCase_ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
lowerCamelCase__ = dict(zip(_lowerCAmelCase ,range(len(_lowerCAmelCase ) ) ) )
lowerCamelCase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCamelCase__ = {"""unk_token""": """[UNK]"""}
lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = """lower newer"""
return input_text, output_text
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowerCamelCase__ = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = tokens + [tokenizer.unk_token]
lowerCamelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = tokenizer("""Hello""" ,"""World""" )
lowerCamelCase__ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] ,_lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
lowerCamelCase__ = tokenizer.encode("""sequence builders""" ,add_special_tokens=_lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=_lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode(
"""sequence builders""" ,add_special_tokens=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode(
"""sequence builders""" ,"""multi-sequence build""" ,add_special_tokens=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase ,_lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase__ = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
lowerCamelCase__ = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
lowerCamelCase__ = tokenizer(_lowerCAmelCase ,padding=_lowerCAmelCase )
lowerCamelCase__ = [tokenizer.decode(_lowerCAmelCase ,skip_special_tokens=_lowerCAmelCase ) for seq in encoding["""input_ids"""]]
# fmt: off
lowerCamelCase__ = {
"""input_ids""": [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase__ = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data ,_lowerCAmelCase )
for expected, decoded in zip(_lowerCAmelCase ,_lowerCAmelCase ):
self.assertEqual(_lowerCAmelCase ,_lowerCAmelCase )
| 50 | import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase ( unittest.TestCase , __a ):
def A_ (self ) -> Tuple:
UpperCamelCase_ : Any = load_tool("""text-to-speech""" )
self.tool.setup()
def A_ (self ) -> Dict:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCamelCase_ : Optional[Any] = self.tool("""hey""" )
UpperCamelCase_ : Optional[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def A_ (self ) -> Dict:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCamelCase_ : Any = self.tool("""hey""" )
UpperCamelCase_ : Tuple = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 635 | 0 |
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> float:
return base * power(__lowerCAmelCase , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
__lowerCAmelCase : str = int(input("Enter the base: ").strip())
__lowerCAmelCase : Optional[Any] = int(input("Enter the exponent: ").strip())
__lowerCAmelCase : Any = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
__lowerCAmelCase : int = 1 / result
print(F'{base} to the power of {exponent} is {result}')
| 702 |
from math import factorial
__lowerCAmelCase : Dict = {str(d): factorial(d) for d in range(10)}
def UpperCAmelCase_ ( __lowerCAmelCase ) -> int:
return sum(DIGIT_FACTORIAL[d] for d in str(__lowerCAmelCase ) )
def UpperCAmelCase_ ( ) -> int:
__lowercase : int = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , __lowerCAmelCase ) if sum_of_digit_factorial(__lowerCAmelCase ) == i )
if __name__ == "__main__":
print(F'{solution() = }')
| 284 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : Union[List[PIL.Image.Image], np.ndarray]
SCREAMING_SNAKE_CASE__ : Optional[List[bool]]
SCREAMING_SNAKE_CASE__ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 202 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : List[str] = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 202 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "perceiver"
def __init__( self , _UpperCAmelCase=256 , _UpperCAmelCase=1280 , _UpperCAmelCase=768 , _UpperCAmelCase=1 , _UpperCAmelCase=26 , _UpperCAmelCase=8 , _UpperCAmelCase=8 , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="kv" , _UpperCAmelCase=1 , _UpperCAmelCase=1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=True , _UpperCAmelCase=262 , _UpperCAmelCase=2048 , _UpperCAmelCase=56 , _UpperCAmelCase=[368, 496] , _UpperCAmelCase=16 , _UpperCAmelCase=1920 , _UpperCAmelCase=16 , _UpperCAmelCase=[1, 16, 224, 224] , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
UpperCAmelCase__ : int = num_latents
UpperCAmelCase__ : Optional[int] = d_latents
UpperCAmelCase__ : Optional[Any] = d_model
UpperCAmelCase__ : List[Any] = num_blocks
UpperCAmelCase__ : int = num_self_attends_per_block
UpperCAmelCase__ : List[str] = num_self_attention_heads
UpperCAmelCase__ : Optional[int] = num_cross_attention_heads
UpperCAmelCase__ : List[str] = qk_channels
UpperCAmelCase__ : Union[str, Any] = v_channels
UpperCAmelCase__ : int = cross_attention_shape_for_attention
UpperCAmelCase__ : Union[str, Any] = self_attention_widening_factor
UpperCAmelCase__ : List[str] = cross_attention_widening_factor
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = initializer_range
UpperCAmelCase__ : Optional[Any] = layer_norm_eps
UpperCAmelCase__ : List[str] = use_query_residual
# masked language modeling attributes
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
# image classification attributes
UpperCAmelCase__ : Union[str, Any] = image_size
# flow attributes
UpperCAmelCase__ : Union[str, Any] = train_size
# multimodal autoencoding attributes
UpperCAmelCase__ : Union[str, Any] = num_frames
UpperCAmelCase__ : Dict = audio_samples_per_frame
UpperCAmelCase__ : Optional[int] = samples_per_patch
UpperCAmelCase__ : str = output_shape
class __UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase ( self ):
if self.task == "multiple-choice":
UpperCAmelCase__ : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase__ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def lowerCamelCase ( self ):
return 1E-4
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = 3 , _UpperCAmelCase = 40 , _UpperCAmelCase = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase__ : Union[str, Any] = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase__ : str = preprocessor.num_special_tokens_to_add(_UpperCAmelCase )
UpperCAmelCase__ : str = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase__ : str = [''' '''.join(['''a'''] ) * seq_length] * batch_size
UpperCAmelCase__ : List[str] = dict(preprocessor(_UpperCAmelCase , return_tensors=_UpperCAmelCase ) )
UpperCAmelCase__ : str = inputs.pop('''input_ids''' )
return inputs
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase__ : Union[str, Any] = compute_effective_axis_dimension(_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase__ : Union[str, Any] = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ : Tuple = dict(preprocessor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase ) )
UpperCAmelCase__ : Optional[Any] = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' ) | 599 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class __UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) | 599 | 1 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase = get_tests_dir('fixtures')
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ) -> Tuple:
# A mock response for an HTTP head request to emulate server down
lowerCAmelCase = mock.Mock()
lowerCAmelCase = 500
lowerCAmelCase = {}
lowerCAmelCase = HTTPError
lowerCAmelCase = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=A_ ) as mock_head:
lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# This check we did call the fake head request
mock_head.assert_called()
def __snake_case ( self ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json""" )
@is_staging_test
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __snake_case ( cls ) -> str:
lowerCAmelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def __snake_case ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id="""test-feature-extractor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-feature-extractor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-feature-extractor""" )
except HTTPError:
pass
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub("""test-feature-extractor""" , use_auth_token=self._token )
lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_ , repo_id="""test-feature-extractor""" , push_to_hub=A_ , use_auth_token=self._token )
lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub("""valid_org/test-feature-extractor""" , use_auth_token=self._token )
lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_ , repo_id="""valid_org/test-feature-extractor-org""" , push_to_hub=A_ , use_auth_token=self._token )
lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor-org""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
def __snake_case ( self ) -> Optional[Any]:
CustomFeatureExtractor.register_for_auto_class()
lowerCAmelCase = CustomFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub("""test-dynamic-feature-extractor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor"""} , )
lowerCAmelCase = AutoFeatureExtractor.from_pretrained(
f'{USER}/test-dynamic-feature-extractor' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , """CustomFeatureExtractor""" ) | 433 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 433 | 1 |
"""simple docstring"""
A__ : int = 8.3144598
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
A__ : Dict = 3_0_0
A__ : int = 2_8
A__ : int = rms_speed_of_molecule(temperature, molar_mass)
print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 272 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A__ : Dict = logging.get_logger(__name__)
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = ['''pixel_values''']
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BICUBIC , A_ = True , A_ = True , A_ = 1 / 255 , A_ = None , A_ = True , A_ = None , A_ = None , **A_ , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
_lowercase: List[str] = size if size is not None else {'''height''': 224, '''width''': 224}
_lowercase: Dict = get_size_dict(A_ )
_lowercase: Any = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_lowercase: List[str] = get_size_dict(A_ , default_to_square=A_ , param_name='''crop_size''' )
_lowercase: List[Any] = do_resize
_lowercase: Tuple = do_rescale
_lowercase: Union[str, Any] = do_normalize
_lowercase: Optional[int] = do_center_crop
_lowercase: List[str] = crop_size
_lowercase: int = size
_lowercase: List[Any] = resample
_lowercase: Optional[Any] = rescale_factor
_lowercase: str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowercase: Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase_ ( self , A_ , A_ , A_ = PILImageResampling.BILINEAR , A_ = None , **A_ , ) -> np.ndarray:
"""simple docstring"""
_lowercase: Union[str, Any] = get_size_dict(A_ )
if "shortest_edge" in size:
_lowercase: Optional[int] = get_resize_output_image_size(A_ , size=size['''shortest_edge'''] , default_to_square=A_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_lowercase: Dict = (size['''height'''], size['''width'''])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def lowercase_ ( self , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
"""simple docstring"""
_lowercase: Dict = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(A_ , size=(size['''height'''], size['''width''']) , data_format=A_ , **A_ )
def lowercase_ ( self , A_ , A_ , A_ = None , **A_ ) -> np.ndarray:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def lowercase_ ( self , A_ , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
"""simple docstring"""
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def lowercase_ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> BatchFeature:
"""simple docstring"""
_lowercase: List[Any] = do_resize if do_resize is not None else self.do_resize
_lowercase: Tuple = do_rescale if do_rescale is not None else self.do_rescale
_lowercase: Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowercase: Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase: Optional[int] = crop_size if crop_size is not None else self.crop_size
_lowercase: List[Any] = get_size_dict(A_ , param_name='''crop_size''' , default_to_square=A_ )
_lowercase: List[str] = resample if resample is not None else self.resample
_lowercase: List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase: List[Any] = image_mean if image_mean is not None else self.image_mean
_lowercase: List[str] = image_std if image_std is not None else self.image_std
_lowercase: List[Any] = size if size is not None else self.size
_lowercase: List[str] = get_size_dict(A_ )
if not is_batched(A_ ):
_lowercase: Optional[Any] = [images]
if not valid_images(A_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_lowercase: Dict = [to_numpy_array(A_ ) for image in images]
if do_resize:
_lowercase: int = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
_lowercase: str = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
_lowercase: Optional[Any] = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
_lowercase: List[Any] = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
_lowercase: Optional[Any] = [to_channel_dimension_format(A_ , A_ ) for image in images]
_lowercase: Any = {'''pixel_values''': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 272 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def a ( self : Union[str, Any] ):
__UpperCAmelCase = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
__UpperCAmelCase = {
'''input_ids''': tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
__UpperCAmelCase = model(_lowercase )['''last_hidden_state''']
__UpperCAmelCase = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice.
__UpperCAmelCase = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 49 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ : str = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> YolosConfig:
"""simple docstring"""
UpperCAmelCase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase = 192
UpperCAmelCase = 768
UpperCAmelCase = 12
UpperCAmelCase = 3
UpperCAmelCase = [800, 1_333]
UpperCAmelCase = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = 330
UpperCAmelCase = 14
UpperCAmelCase = 6
UpperCAmelCase = 1_320
elif "yolos_s" in yolos_name:
UpperCAmelCase = 384
UpperCAmelCase = 1_536
UpperCAmelCase = 12
UpperCAmelCase = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase = [800, 1_344]
UpperCAmelCase = 91
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''coco-detection-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosConfig , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
if "backbone" in name:
UpperCAmelCase = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
UpperCAmelCase = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
UpperCAmelCase = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
UpperCAmelCase = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
UpperCAmelCase = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
UpperCAmelCase = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosForObjectDetection ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = int(key_split[2] )
UpperCAmelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[
dim : dim * 2, :
]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[:dim]
UpperCAmelCase = val[dim : dim * 2]
UpperCAmelCase = val[-dim:]
else:
UpperCAmelCase = val
return orig_state_dict
def __snake_case ( ) -> torch.Tensor:
"""simple docstring"""
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
UpperCAmelCase = get_yolos_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model''']
# load 🤗 model
UpperCAmelCase = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase = 800 if yolos_name != '''yolos_ti''' else 512
UpperCAmelCase = YolosImageProcessor(format='''coco_detection''' , size=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = outputs.logits, outputs.pred_boxes
UpperCAmelCase, UpperCAmelCase = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
UpperCAmelCase = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
UpperCAmelCase = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
UpperCAmelCase = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
UpperCAmelCase = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
UpperCAmelCase = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
UpperCAmelCase = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
UpperCAmelCase = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
UpperCAmelCase = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a__ : Optional[Any] = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 51 | 0 |
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str = "isbn/0140328726" ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F"{olid} is not a valid Open Library olid"
raise ValueError(SCREAMING_SNAKE_CASE_ )
return requests.get(F"https://openlibrary.org/{new_olid}.json" ).json()
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : dict ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
SCREAMING_SNAKE_CASE_ : int = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
SCREAMING_SNAKE_CASE_ : Optional[int] = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
SCREAMING_SNAKE_CASE_ : List[str] = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : List[str] = ", ".join(SCREAMING_SNAKE_CASE_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
snake_case_ : int = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(F'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
snake_case_ : Optional[Any] = summarize_book(get_openlibrary_data(F'''isbn/{isbn}'''))
print('\n'.join(F'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'''Sorry, there are no results for ISBN: {isbn}.''')
| 719 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE_ : Any = DisjunctiveConstraint(lowercase__ )
self.assertTrue(isinstance(dc.token_ids , lowercase__ ) )
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint(lowercase__ ) # fails here
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE_ : Optional[Any] = DisjunctiveConstraint(lowercase__ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = dc.update(1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = dc.update(2 )
SCREAMING_SNAKE_CASE_ : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Any = dc.update(3 )
SCREAMING_SNAKE_CASE_ : Tuple = stepped is True and completed is True and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE_ : Dict = DisjunctiveConstraint(lowercase__ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 68 | 0 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __lowerCamelCase ( UpperCAmelCase_ ) ->List[Tuple[int, ...]]:
snake_case__ = []
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
for v in tree.values():
shapes.extend(_fetch_dims(UpperCAmelCase_ ) )
elif isinstance(UpperCAmelCase_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(UpperCAmelCase_ ) )
elif isinstance(UpperCAmelCase_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Tuple[int, ...]:
snake_case__ = []
for d in reversed(UpperCAmelCase_ ):
idx.append(flat_idx % d )
snake_case__ = flat_idx // d
return tuple(reversed(UpperCAmelCase_ ) )
@torch.jit.ignore
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , ) ->List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(UpperCAmelCase_ ) -> None:
snake_case__ = True
for i in range(len(UpperCAmelCase_ ) ):
snake_case__ = -1 * (i + 1)
l[reversed_idx] &= tally
snake_case__ = l[reversed_idx]
if start_edges is None:
snake_case__ = [s == 0 for s in start]
reduce_edge_list(UpperCAmelCase_ )
if end_edges is None:
snake_case__ = [e == (d - 1) for e, d in zip(UpperCAmelCase_ , UpperCAmelCase_ )]
reduce_edge_list(UpperCAmelCase_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(UpperCAmelCase_ ) == 0:
return [()]
elif len(UpperCAmelCase_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
snake_case__ = []
snake_case__ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
if s == e:
path_list.append(slice(UpperCAmelCase_ , s + 1 ) )
else:
break
snake_case__ = tuple(UpperCAmelCase_ )
snake_case__ = len(UpperCAmelCase_ )
# start == end, and we're done
if divergence_idx == len(UpperCAmelCase_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
snake_case__ = start[divergence_idx]
return tuple(
path + (slice(UpperCAmelCase_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
snake_case__ = end[divergence_idx]
return tuple(
path + (slice(UpperCAmelCase_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
snake_case__ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->torch.Tensor:
snake_case__ = t.shape[:no_batch_dims]
snake_case__ = list(_flat_idx_to_idx(UpperCAmelCase_ , UpperCAmelCase_ ) )
# _get_minimal_slice_set is inclusive
snake_case__ = list(_flat_idx_to_idx(flat_end - 1 , UpperCAmelCase_ ) )
# Get an ordered list of slices to perform
snake_case__ = _get_minimal_slice_set(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
snake_case__ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = False , ) ->Any:
if not (len(UpperCAmelCase_ ) > 0):
raise ValueError('Must provide at least one input' )
snake_case__ = [shape[:no_batch_dims] for shape in _fetch_dims(UpperCAmelCase_ )]
snake_case__ = tuple([max(UpperCAmelCase_ ) for s in zip(*UpperCAmelCase_ )] )
def _prep_inputs(UpperCAmelCase_ ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
snake_case__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
snake_case__ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
snake_case__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
snake_case__ = tensor_tree_map(_prep_inputs , UpperCAmelCase_ )
snake_case__ = None
if _out is not None:
snake_case__ = tensor_tree_map(lambda UpperCAmelCase_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
snake_case__ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
snake_case__ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(UpperCAmelCase_ ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
snake_case__ = 0
snake_case__ = prepped_outputs
for _ in range(UpperCAmelCase_ ):
# Chunk the input
if not low_mem:
snake_case__ = _select_chunk
else:
snake_case__ = partial(
_chunk_slice , flat_start=UpperCAmelCase_ , flat_end=min(UpperCAmelCase_ , i + chunk_size ) , no_batch_dims=len(UpperCAmelCase_ ) , )
snake_case__ = tensor_tree_map(UpperCAmelCase_ , UpperCAmelCase_ )
# Run the layer on the chunk
snake_case__ = layer(**UpperCAmelCase_ )
# Allocate space for the output
if out is None:
snake_case__ = tensor_tree_map(lambda UpperCAmelCase_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , UpperCAmelCase_ )
# Put the chunk in its pre-allocated space
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
def assign(UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
for k, v in da.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
assign(UpperCAmelCase_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
snake_case__ = da[k]
assign(UpperCAmelCase_ , UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
for xa, xa in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
snake_case__ = xa
elif isinstance(UpperCAmelCase_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
snake_case__ = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
snake_case__ = tensor_tree_map(lambda UpperCAmelCase_ : t.view(orig_batch_dims + t.shape[1:] ) , UpperCAmelCase_ )
return out
class __snake_case :
def __init__( self , UpperCamelCase_ = 512 , ) -> Tuple:
snake_case__ = max_chunk_size
snake_case__ = None
snake_case__ = None
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
snake_case__ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
snake_case__ = [c for c in candidates if c > min_chunk_size]
snake_case__ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCamelCase_ ) -> bool:
try:
with torch.no_grad():
fn(*UpperCamelCase_ , chunk_size=UpperCamelCase_ )
return True
except RuntimeError:
return False
snake_case__ = 0
snake_case__ = len(UpperCamelCase_ ) - 1
while i > min_viable_chunk_size_index:
snake_case__ = test_chunk_size(candidates[i] )
if not viable:
snake_case__ = (min_viable_chunk_size_index + i) // 2
else:
snake_case__ = i
snake_case__ = (i + len(UpperCamelCase_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ) -> bool:
snake_case__ = True
for aa, aa in zip(UpperCamelCase_ , UpperCamelCase_ ):
assert type(UpperCamelCase_ ) == type(UpperCamelCase_ )
if isinstance(UpperCamelCase_ , (list, tuple) ):
consistent &= self._compare_arg_caches(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
snake_case__ = [v for _, v in sorted(aa.items() , key=lambda UpperCamelCase_ : x[0] )]
snake_case__ = [v for _, v in sorted(aa.items() , key=lambda UpperCamelCase_ : x[0] )]
consistent &= self._compare_arg_caches(UpperCamelCase_ , UpperCamelCase_ )
else:
consistent &= aa == aa
return consistent
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> int:
snake_case__ = True
snake_case__ = tree_map(lambda UpperCamelCase_ : a.shape if isinstance(UpperCamelCase_ , torch.Tensor ) else a , UpperCamelCase_ , UpperCamelCase_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(UpperCamelCase_ )
snake_case__ = self._compare_arg_caches(self.cached_arg_data , UpperCamelCase_ )
else:
# Otherwise, we can reuse the precomputed value
snake_case__ = False
if not consistent:
snake_case__ = self._determine_favorable_chunk_size(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
snake_case__ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 368 |
'''simple docstring'''
from math import loga
def __lowerCamelCase ( UpperCAmelCase_ ) ->int:
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ : List[Any] = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Any = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[Any] = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Union[str, Any] = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 204 | '''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCAmelCase_ : Optional[int] = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=3_0522, type=int)
lowerCAmelCase_ : str = parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, """rb""") as fp:
lowerCAmelCase_ : Tuple = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
lowerCAmelCase_ : str = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCAmelCase_ : Any = [0] * args.vocab_size
for k, v in counter.items():
lowerCAmelCase_ : Optional[Any] = v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 204 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class A :
# setable values
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def __lowerCAmelCase ( cls ) -> int:
return cls()
@dataclass
class A ( a ):
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : KarrasVeSchedulerState
class A ( a , a ):
@property
def __lowerCAmelCase ( self ) -> Dict:
return True
@register_to_config
def __init__( self , snake_case_ = 0.02 , snake_case_ = 1_0_0 , snake_case_ = 1.007 , snake_case_ = 8_0 , snake_case_ = 0.05 , snake_case_ = 5_0 , ) -> List[str]:
pass
def __lowerCAmelCase ( self ) -> Any:
return KarrasVeSchedulerState.create()
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ = () ) -> KarrasVeSchedulerState:
_a = jnp.arange(0 , snake_case_ )[::-1].copy()
_a = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=snake_case_ , schedule=jnp.array(snake_case_ , dtype=jnp.floataa ) , timesteps=snake_case_ , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
_a = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
_a = 0
# sample eps ~ N(0, S_noise^2 * I)
_a = random.split(snake_case_ , num=1 )
_a = self.config.s_noise * random.normal(key=snake_case_ , shape=sample.shape )
_a = sigma + gamma * sigma
_a = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
_a = sample_hat + sigma_hat * model_output
_a = (sample_hat - pred_original_sample) / sigma_hat
_a = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=snake_case_ , derivative=snake_case_ , state=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
_a = sample_prev + sigma_prev * model_output
_a = (sample_prev - pred_original_sample) / sigma_prev
_a = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=snake_case_ , derivative=snake_case_ , state=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
raise NotImplementedError()
| 131 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class A ( unittest.TestCase ):
__UpperCAmelCase : List[str] = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
__UpperCAmelCase : Optional[Any] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
_a = AudioClassificationPipeline(model=snake_case_ , feature_extractor=snake_case_ )
# test with a raw waveform
_a = np.zeros((3_4_0_0_0,) )
_a = np.zeros((1_4_0_0_0,) )
return audio_classifier, [audioa, audio]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Dict:
_a , _a = examples
_a = audio_classifier(snake_case_ )
# by default a model is initialized with num_labels=2
self.assertEqual(
snake_case_ , [
{"score": ANY(snake_case_ ), "label": ANY(snake_case_ )},
{"score": ANY(snake_case_ ), "label": ANY(snake_case_ )},
] , )
_a = audio_classifier(snake_case_ , top_k=1 )
self.assertEqual(
snake_case_ , [
{"score": ANY(snake_case_ ), "label": ANY(snake_case_ )},
] , )
self.run_torchaudio(snake_case_ )
@require_torchaudio
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]:
import datasets
# test with a local file
_a = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
_a = dataset[0]["audio"]["array"]
_a = audio_classifier(snake_case_ )
self.assertEqual(
snake_case_ , [
{"score": ANY(snake_case_ ), "label": ANY(snake_case_ )},
{"score": ANY(snake_case_ ), "label": ANY(snake_case_ )},
] , )
@require_torch
def __lowerCAmelCase ( self ) -> int:
_a = "anton-l/wav2vec2-random-tiny-classifier"
_a = pipeline("audio-classification" , model=snake_case_ )
_a = np.ones((8_0_0_0,) )
_a = audio_classifier(snake_case_ , top_k=4 )
_a = [
{"score": 0.0_842, "label": "no"},
{"score": 0.0_838, "label": "up"},
{"score": 0.0_837, "label": "go"},
{"score": 0.0_834, "label": "right"},
]
_a = [
{"score": 0.0_845, "label": "stop"},
{"score": 0.0_844, "label": "on"},
{"score": 0.0_841, "label": "right"},
{"score": 0.0_834, "label": "left"},
]
self.assertIn(nested_simplify(snake_case_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_a = {"array": np.ones((8_0_0_0,) ), "sampling_rate": audio_classifier.feature_extractor.sampling_rate}
_a = audio_classifier(snake_case_ , top_k=4 )
self.assertIn(nested_simplify(snake_case_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
import datasets
_a = "superb/wav2vec2-base-superb-ks"
_a = pipeline("audio-classification" , model=snake_case_ )
_a = datasets.load_dataset("anton-l/superb_dummy" , "ks" , split="test" )
_a = np.array(dataset[3]["speech"] , dtype=np.floataa )
_a = audio_classifier(snake_case_ , top_k=4 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=3 ) , [
{"score": 0.981, "label": "go"},
{"score": 0.007, "label": "up"},
{"score": 0.006, "label": "_unknown_"},
{"score": 0.001, "label": "down"},
] , )
@require_tf
@unittest.skip("Audio classification is not implemented for TF" )
def __lowerCAmelCase ( self ) -> Dict:
pass
| 131 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__A = logging.get_logger(__name__)
class _snake_case ( a__ ):
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : float , **UpperCAmelCase : Optional[Any] ):
__lowerCamelCase : Optional[Any] = feature_size
__lowerCamelCase : Dict = sampling_rate
__lowerCamelCase : List[str] = padding_value
__lowerCamelCase : List[Any] = kwargs.pop("padding_side" , "right" )
__lowerCamelCase : List[str] = kwargs.pop("return_attention_mask" , UpperCAmelCase )
super().__init__(**UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , UpperCAmelCase : Union[bool, str, PaddingStrategy] = True , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(UpperCAmelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__lowerCamelCase : Tuple = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
__lowerCamelCase : Optional[int] = processed_features[self.model_input_names[0]]
__lowerCamelCase : Dict = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(UpperCAmelCase ) == 0:
if return_attention_mask:
__lowerCamelCase : Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__lowerCamelCase : Tuple = required_input[0]
if isinstance(UpperCAmelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__lowerCamelCase : str = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(UpperCAmelCase ):
__lowerCamelCase : Tuple = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(UpperCAmelCase ):
__lowerCamelCase : Dict = "tf"
elif is_torch_tensor(UpperCAmelCase ):
__lowerCamelCase : List[str] = "pt"
elif isinstance(UpperCAmelCase , (int, float, list, tuple, np.ndarray) ):
__lowerCamelCase : Optional[int] = "np"
else:
raise ValueError(
F"""type of {first_element} unknown: {type(UpperCAmelCase )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__lowerCamelCase : str = to_numpy(UpperCAmelCase )
else:
__lowerCamelCase : str = [to_numpy(UpperCAmelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
__lowerCamelCase : List[str] = self._get_padding_strategies(padding=UpperCAmelCase , max_length=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = processed_features[self.model_input_names[0]]
__lowerCamelCase : int = len(UpperCAmelCase )
if not all(len(UpperCAmelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
__lowerCamelCase : str = []
for i in range(UpperCAmelCase ):
__lowerCamelCase : int = {k: v[i] for k, v in processed_features.items()}
# truncation
__lowerCamelCase : Dict = self._truncate(
UpperCAmelCase , max_length=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , truncation=UpperCAmelCase , )
truncated_inputs.append(UpperCAmelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__lowerCamelCase : int = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__lowerCamelCase : Dict = PaddingStrategy.MAX_LENGTH
__lowerCamelCase : str = {}
for i in range(UpperCAmelCase ):
# padding
__lowerCamelCase : int = self._pad(
truncated_inputs[i] , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
__lowerCamelCase : str = []
if value.dtype is np.dtype(np.floataa ):
__lowerCamelCase : Tuple = value.astype(np.floataa )
batch_outputs[key].append(UpperCAmelCase )
return BatchFeature(UpperCAmelCase , tensor_type=UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Union[Dict[str, np.ndarray], BatchFeature] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ):
__lowerCamelCase : List[str] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__lowerCamelCase : Optional[Any] = len(UpperCAmelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__lowerCamelCase : int = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__lowerCamelCase : Optional[int] = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(UpperCAmelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__lowerCamelCase : List[str] = np.ones(len(UpperCAmelCase ) , dtype=np.intaa )
if needs_to_be_padded:
__lowerCamelCase : Optional[Any] = max_length - len(UpperCAmelCase )
if self.padding_side == "right":
if return_attention_mask:
__lowerCamelCase : int = np.pad(
processed_features["attention_mask"] , (0, difference) )
__lowerCamelCase : str = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__lowerCamelCase : Optional[Any] = np.pad(
UpperCAmelCase , UpperCAmelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__lowerCamelCase : int = np.pad(
processed_features["attention_mask"] , (difference, 0) )
__lowerCamelCase : List[str] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__lowerCamelCase : Union[str, Any] = np.pad(
UpperCAmelCase , UpperCAmelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Union[Dict[str, np.ndarray], BatchFeature] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
__lowerCamelCase : Dict = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__lowerCamelCase : List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__lowerCamelCase : Union[str, Any] = len(UpperCAmelCase ) > max_length
if needs_to_be_truncated:
__lowerCamelCase : Dict = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__lowerCamelCase : List[Any] = processed_features["attention_mask"][:max_length]
return processed_features
def lowerCamelCase__ ( self : str , UpperCAmelCase : Tuple=False , UpperCAmelCase : List[Any]=None ):
# Get padding strategy
if padding is not False:
if padding is True:
__lowerCamelCase : int = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Any = PaddingStrategy(UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Optional[Any] = padding
else:
__lowerCamelCase : Optional[int] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 701 | """simple docstring"""
import os
def lowercase_ ( ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = os.path.dirname(os.path.realpath(_lowerCamelCase ) )
__lowerCamelCase : int = os.path.join(_lowerCamelCase , "triangle.txt" )
with open(_lowerCamelCase ) as f:
__lowerCamelCase : List[str] = f.readlines()
__lowerCamelCase : List[str] = []
for line in triangle:
__lowerCamelCase : Optional[int] = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(_lowerCamelCase ) )
a.append(_lowerCamelCase )
for i in range(1 , len(_lowerCamelCase ) ):
for j in range(len(a[i] ) ):
__lowerCamelCase : int = a[i - 1][j] if j != len(a[i - 1] ) else 0
__lowerCamelCase : Union[str, Any] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_lowerCamelCase , _lowerCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution()) | 366 | 0 |
"""simple docstring"""
def __A ( a_ :Optional[Any]) -> Optional[int]:
__a : Any = []
__a : Union[str, Any] = []
__a : Dict = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__a : Optional[Any] = len(a_) if (len(a_) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8) , '''Stack'''.center(a_) , '''Postfix'''.center(a_) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7))
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(a_) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(a_) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop()) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(a_) == 0:
stack.append(a_) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(a_) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop()) # pop stack & add to Postfix
stack.append(a_) # push x to stack
print(
x.center(8) , (''''''.join(a_)).ljust(a_) , (''''''.join(a_)).ljust(a_) , sep=''' | ''' , ) # Output in tabular format
while len(a_) > 0: # while stack is not empty
post_fix.append(stack.pop()) # pop stack & add to Postfix
print(
''' '''.center(8) , (''''''.join(a_)).ljust(a_) , (''''''.join(a_)).ljust(a_) , sep=''' | ''' , ) # Output in tabular format
return "".join(a_) # return Postfix as str
def __A ( a_ :Optional[int]) -> List[str]:
__a : Any = list(infix[::-1]) # reverse the infix equation
for i in range(len(a_)):
if infix[i] == "(":
__a : Optional[int] = ''')''' # change "(" to ")"
elif infix[i] == ")":
__a : int = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(a_)))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
A = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''') | 52 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Optional[int] = [10, 20, 30, 40, 50, 60]
__a : Union[str, Any] = [2, 4, 6, 8, 10, 12]
__a : List[str] = 100
self.assertEqual(kp.calc_profit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 210 )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Weight can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Profit can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(
_UpperCAmelCase , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main() | 52 | 1 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , lowerCamelCase : int = 128 , lowerCamelCase : int = 256 , lowerCamelCase : float = 2000.0 , lowerCamelCase : int = 768 , lowerCamelCase : int = 12 , lowerCamelCase : int = 12 , lowerCamelCase : int = 64 , lowerCamelCase : int = 2048 , lowerCamelCase : float = 0.1 , ) -> str:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Sequential(
nn.Linear(lowerCamelCase , d_model * 4 , bias=lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCamelCase ) , nn.SiLU() , )
_UpperCAmelCase = nn.Embedding(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = False
_UpperCAmelCase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
_UpperCAmelCase = nn.Dropout(p=lowerCamelCase )
_UpperCAmelCase = nn.ModuleList()
for lyr_num in range(lowerCamelCase ):
# FiLM conditional T5 decoder
_UpperCAmelCase = DecoderLayer(d_model=lowerCamelCase , d_kv=lowerCamelCase , num_heads=lowerCamelCase , d_ff=lowerCamelCase , dropout_rate=lowerCamelCase )
self.decoders.append(lowerCamelCase )
_UpperCAmelCase = TaLayerNorm(lowerCamelCase )
_UpperCAmelCase = nn.Dropout(p=lowerCamelCase )
_UpperCAmelCase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
def lowerCamelCase ( self : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : str ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : str ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_UpperCAmelCase = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_UpperCAmelCase = self.conditioning_emb(lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_UpperCAmelCase = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_UpperCAmelCase = torch.broadcast_to(
torch.arange(lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_UpperCAmelCase = self.position_encoding(lowerCamelCase )
_UpperCAmelCase = self.continuous_inputs_projection(lowerCamelCase )
inputs += position_encodings
_UpperCAmelCase = self.dropout(lowerCamelCase )
# decoder: No padding present.
_UpperCAmelCase = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_UpperCAmelCase = [(x, self.encoder_decoder_mask(lowerCamelCase , lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_UpperCAmelCase = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_UpperCAmelCase = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_UpperCAmelCase = lyr(
lowerCamelCase , conditioning_emb=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )[0]
_UpperCAmelCase = self.decoder_norm(lowerCamelCase )
_UpperCAmelCase = self.post_dropout(lowerCamelCase )
_UpperCAmelCase = self.spec_out(lowerCamelCase )
return spec_out
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : Any=1E-6 ) -> int:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCamelCase , d_kv=lowerCamelCase , num_heads=lowerCamelCase , dropout_rate=lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCamelCase , d_kv=lowerCamelCase , num_heads=lowerCamelCase , dropout_rate=lowerCamelCase , layer_norm_epsilon=lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCamelCase , d_ff=lowerCamelCase , dropout_rate=lowerCamelCase , layer_norm_epsilon=lowerCamelCase ) )
def lowerCamelCase ( self : str , lowerCamelCase : Tuple , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Optional[int]=None , lowerCamelCase : Any=None , lowerCamelCase : Optional[int]=None , ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.layer[0](
lowerCamelCase , conditioning_emb=lowerCamelCase , attention_mask=lowerCamelCase , )
if encoder_hidden_states is not None:
_UpperCAmelCase = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_UpperCAmelCase = self.layer[1](
lowerCamelCase , key_value_states=lowerCamelCase , attention_mask=lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
_UpperCAmelCase = self.layer[-1](lowerCamelCase , lowerCamelCase )
return (hidden_states,)
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : int ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = TaLayerNorm(lowerCamelCase )
_UpperCAmelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCamelCase )
_UpperCAmelCase = Attention(query_dim=lowerCamelCase , heads=lowerCamelCase , dim_head=lowerCamelCase , out_bias=lowerCamelCase , scale_qk=lowerCamelCase )
_UpperCAmelCase = nn.Dropout(lowerCamelCase )
def lowerCamelCase ( self : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , lowerCamelCase : Dict=None , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.layer_norm(lowerCamelCase )
if conditioning_emb is not None:
_UpperCAmelCase = self.FiLMLayer(lowerCamelCase , lowerCamelCase )
# Self-attention block
_UpperCAmelCase = self.attention(lowerCamelCase )
_UpperCAmelCase = hidden_states + self.dropout(lowerCamelCase )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Attention(query_dim=lowerCamelCase , heads=lowerCamelCase , dim_head=lowerCamelCase , out_bias=lowerCamelCase , scale_qk=lowerCamelCase )
_UpperCAmelCase = TaLayerNorm(lowerCamelCase , eps=lowerCamelCase )
_UpperCAmelCase = nn.Dropout(lowerCamelCase )
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : int=None , lowerCamelCase : Optional[Any]=None , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.layer_norm(lowerCamelCase )
_UpperCAmelCase = self.attention(
lowerCamelCase , encoder_hidden_states=lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
_UpperCAmelCase = hidden_states + self.dropout(lowerCamelCase )
return layer_output
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = TaDenseGatedActDense(d_model=lowerCamelCase , d_ff=lowerCamelCase , dropout_rate=lowerCamelCase )
_UpperCAmelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCamelCase )
_UpperCAmelCase = TaLayerNorm(lowerCamelCase , eps=lowerCamelCase )
_UpperCAmelCase = nn.Dropout(lowerCamelCase )
def lowerCamelCase ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : str=None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.layer_norm(lowerCamelCase )
if conditioning_emb is not None:
_UpperCAmelCase = self.film(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = self.DenseReluDense(lowerCamelCase )
_UpperCAmelCase = hidden_states + self.dropout(lowerCamelCase )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[str] ) -> Any:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
_UpperCAmelCase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
_UpperCAmelCase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
_UpperCAmelCase = nn.Dropout(lowerCamelCase )
_UpperCAmelCase = NewGELUActivation()
def lowerCamelCase ( self : Tuple , lowerCamelCase : Dict ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.act(self.wi_a(lowerCamelCase ) )
_UpperCAmelCase = self.wi_a(lowerCamelCase )
_UpperCAmelCase = hidden_gelu * hidden_linear
_UpperCAmelCase = self.dropout(lowerCamelCase )
_UpperCAmelCase = self.wo(lowerCamelCase )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , lowerCamelCase : Tuple , lowerCamelCase : str=1E-6 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Parameter(torch.ones(lowerCamelCase ) )
_UpperCAmelCase = eps
def lowerCamelCase ( self : List[str] , lowerCamelCase : int ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCamelCase )
_UpperCAmelCase = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_UpperCAmelCase = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(lowerCamelCase , 3.0 )) ))
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCamelCase : Any , lowerCamelCase : Any ) -> Optional[int]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Linear(lowerCamelCase , out_features * 2 , bias=lowerCamelCase )
def lowerCamelCase ( self : int , lowerCamelCase : str , lowerCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.scale_bias(lowerCamelCase )
_UpperCAmelCase , _UpperCAmelCase = torch.chunk(lowerCamelCase , 2 , -1 )
_UpperCAmelCase = x * (1 + scale) + shift
return x | 715 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__a: Optional[Any] = logging.get_logger(__name__)
__a: str = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = '''bit'''
_lowerCamelCase = ['''preactivation''', '''bottleneck''']
_lowerCamelCase = ['''SAME''', '''VALID''']
def __init__( self : List[Any] , lowerCamelCase : Dict=3 , lowerCamelCase : str=64 , lowerCamelCase : Union[str, Any]=[256, 512, 1024, 2048] , lowerCamelCase : Union[str, Any]=[3, 4, 6, 3] , lowerCamelCase : Optional[int]="preactivation" , lowerCamelCase : Optional[int]="relu" , lowerCamelCase : Optional[int]=None , lowerCamelCase : List[Any]=32 , lowerCamelCase : Tuple=0.0 , lowerCamelCase : Optional[int]=False , lowerCamelCase : Any=32 , lowerCamelCase : Tuple=1 , lowerCamelCase : Optional[int]=None , lowerCamelCase : str=None , **lowerCamelCase : Any , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_UpperCAmelCase = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
_UpperCAmelCase = num_channels
_UpperCAmelCase = embedding_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = layer_type
_UpperCAmelCase = hidden_act
_UpperCAmelCase = global_padding
_UpperCAmelCase = num_groups
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = embedding_dynamic_padding
_UpperCAmelCase = output_stride
_UpperCAmelCase = width_factor
_UpperCAmelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names ) | 402 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE : int = {
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
__SCREAMING_SNAKE_CASE : Dict = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_A , _A )
def UpperCAmelCase__ ( self : int , **_A : Any ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase__ ( self : Optional[int] , **_A : Optional[Any] ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase__ ( self : Union[str, Any] , **_A : Optional[int] ):
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE : List[str] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Any = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Optional[int] = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Optional[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
__SCREAMING_SNAKE_CASE : List[str] = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : str = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
__SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor(do_normalize=_A )
__SCREAMING_SNAKE_CASE : str = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Union[str, Any] = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = image_processor(_A , return_tensors='''np''' )
__SCREAMING_SNAKE_CASE : List[str] = processor(images=_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[int] = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__SCREAMING_SNAKE_CASE : str = '''Alexandra,T-shirt的价格是15便士。'''
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=_A )
__SCREAMING_SNAKE_CASE : str = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__SCREAMING_SNAKE_CASE : int = '''Alexandra,T-shirt的价格是15便士。'''
__SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : int = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__SCREAMING_SNAKE_CASE : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE : str = processor.batch_decode(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__SCREAMING_SNAKE_CASE : int = '''Alexandra,T-shirt的价格是15便士。'''
__SCREAMING_SNAKE_CASE : Dict = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Optional[int] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 74 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
_lowerCAmelCase = int(number**0.5 )
return number == sq * sq
def __snake_case ( SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
_lowerCAmelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_lowerCAmelCase = x_den * y_den * z_den
_lowerCAmelCase = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def __snake_case ( SCREAMING_SNAKE_CASE: int = 35 ):
"""simple docstring"""
_lowerCAmelCase = set()
_lowerCAmelCase = 42
_lowerCAmelCase = Fraction(0 )
_lowerCAmelCase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_lowerCAmelCase = x_num * y_den + x_den * y_num
_lowerCAmelCase = x_den * y_den
_lowerCAmelCase = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=2
_lowerCAmelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_lowerCAmelCase = x_den * x_den * y_den * y_den
if is_sq(SCREAMING_SNAKE_CASE ) and is_sq(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = int(sqrt(SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = int(sqrt(SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=-1
_lowerCAmelCase = x_num * y_num
_lowerCAmelCase = x_den * y_num + x_num * y_den
_lowerCAmelCase = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=2
_lowerCAmelCase = x_num * x_num * y_num * y_num
_lowerCAmelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(SCREAMING_SNAKE_CASE ) and is_sq(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = int(sqrt(SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = int(sqrt(SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'{solution() = }')
| 580 | 0 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _A ( A__ , A__ , A__ , A__=5 ):
"""simple docstring"""
assert masked_input.count('''<mask>''' ) == 1
__lowercase = torch.tensor(tokenizer.encode(A__ , add_special_tokens=A__ ) ).unsqueeze(0 ) # Batch size 1
__lowercase = model(A__ )[0] # The last hidden-state is the first element of the output tuple
__lowercase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__lowercase = logits[0, masked_index, :]
__lowercase = logits.softmax(dim=0 )
__lowercase , __lowercase = prob.topk(k=A__ , dim=0 )
__lowercase = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(A__ ) )] )
__lowercase = tokenizer.mask_token
__lowercase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
__lowercase = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(A__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(A__ ) , A__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(A__ , A__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowerCAmelCase__ = CamembertTokenizer.from_pretrained('''camembert-base''')
lowerCAmelCase__ = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowerCAmelCase__ = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 710 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowerCAmelCase__ = (720, 1280) # Height, Width
lowerCAmelCase__ = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowerCAmelCase__ = 1 / 100
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = 250
def _A ( ):
"""simple docstring"""
__lowercase , __lowercase = get_dataset(A__ , A__ )
for index in range(A__ ):
__lowercase = random.sample(range(len(A__ ) ) , 4 )
__lowercase , __lowercase , __lowercase = update_image_and_anno(
A__ , A__ , A__ , A__ , A__ , filter_scale=A__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowercase = random_chars(32 )
__lowercase = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__lowercase = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(F"{file_root}.jpg" , A__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
__lowercase = []
for anno in new_annos:
__lowercase = anno[3] - anno[1]
__lowercase = anno[4] - anno[2]
__lowercase = anno[1] + width / 2
__lowercase = anno[2] + height / 2
__lowercase = F"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(A__ )
with open(F"{file_root}.txt" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = []
for label_file in glob.glob(os.path.join(A__ , '''*.txt''' ) ):
__lowercase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(A__ ) as in_file:
__lowercase = in_file.readlines()
__lowercase = os.path.join(A__ , F"{label_name}.jpg" )
__lowercase = []
for obj_list in obj_lists:
__lowercase = obj_list.rstrip('''\n''' ).split(''' ''' )
__lowercase = float(obj[1] ) - float(obj[3] ) / 2
__lowercase = float(obj[2] ) - float(obj[4] ) / 2
__lowercase = float(obj[1] ) + float(obj[3] ) / 2
__lowercase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(A__ )
labels.append(A__ )
return img_paths, labels
def _A ( A__ , A__ , A__ , A__ , A__ , A__ = 0.0 , ):
"""simple docstring"""
__lowercase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__lowercase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowercase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowercase = int(scale_x * output_size[1] )
__lowercase = int(scale_y * output_size[0] )
__lowercase = []
__lowercase = []
for i, index in enumerate(A__ ):
__lowercase = all_img_list[index]
path_list.append(A__ )
__lowercase = all_annos[index]
__lowercase = cva.imread(A__ )
if i == 0: # top-left
__lowercase = cva.resize(A__ , (divid_point_x, divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = bbox[1] * scale_x
__lowercase = bbox[2] * scale_y
__lowercase = bbox[3] * scale_x
__lowercase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__lowercase = cva.resize(A__ , (output_size[1] - divid_point_x, divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = scale_x + bbox[1] * (1 - scale_x)
__lowercase = bbox[2] * scale_y
__lowercase = scale_x + bbox[3] * (1 - scale_x)
__lowercase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__lowercase = cva.resize(A__ , (divid_point_x, output_size[0] - divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = bbox[1] * scale_x
__lowercase = scale_y + bbox[2] * (1 - scale_y)
__lowercase = bbox[3] * scale_x
__lowercase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__lowercase = cva.resize(
A__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = scale_x + bbox[1] * (1 - scale_x)
__lowercase = scale_y + bbox[2] * (1 - scale_y)
__lowercase = scale_x + bbox[3] * (1 - scale_x)
__lowercase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__lowercase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _A ( A__ ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__lowercase = ascii_lowercase + digits
return "".join(random.choice(A__ ) for _ in range(A__ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 624 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = None
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = 2
@register_to_config
def __init__( self : List[Any] , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 100 , _UpperCAmelCase : float = 1.007 , _UpperCAmelCase : float = 80 , _UpperCAmelCase : float = 0.05 , _UpperCAmelCase : float = 50 , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = sigma_max
# setable values
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None # sigma(t_i)
def lowercase__ ( self : List[Any] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def lowercase__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, torch.device] = None ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = num_inference_steps
UpperCAmelCase_ = np.arange(0 , self.num_inference_steps )[::-1].copy()
UpperCAmelCase_ = torch.from_numpy(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCAmelCase_ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
UpperCAmelCase_ = torch.tensor(_UpperCAmelCase , dtype=torch.floataa , device=_UpperCAmelCase )
def lowercase__ ( self : Any , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : float , _UpperCAmelCase : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase_ = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
UpperCAmelCase_ = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase_ = self.config.s_noise * randn_tensor(sample.shape , generator=_UpperCAmelCase ).to(sample.device )
UpperCAmelCase_ = sigma + gamma * sigma
UpperCAmelCase_ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
UpperCAmelCase_ = sample_hat + sigma_hat * model_output
UpperCAmelCase_ = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase_ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_UpperCAmelCase , derivative=_UpperCAmelCase , pred_original_sample=_UpperCAmelCase )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
UpperCAmelCase_ = sample_prev + sigma_prev * model_output
UpperCAmelCase_ = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase_ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_UpperCAmelCase , derivative=_UpperCAmelCase , pred_original_sample=_UpperCAmelCase )
def lowercase__ ( self : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
raise NotImplementedError()
| 82 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__A = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class A ( unittest.TestCase ):
@classmethod
def A__ ( cls ) -> List[Any]:
'''simple docstring'''
lowercase__ = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def A__ ( cls ) -> Tuple:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowercase__ = FlaxBertModel(lowerCamelCase__ )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
lowercase__ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
lowercase__ = flatten_dict(unfreeze(model.params ) )
lowercase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ , repo_id="""test-model-flax""" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
lowercase__ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
lowercase__ = flatten_dict(unfreeze(model.params ) )
lowercase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1e-3 , msg=F'''{key} not identical''' )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowercase__ = FlaxBertModel(lowerCamelCase__ )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
lowercase__ = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
lowercase__ = flatten_dict(unfreeze(model.params ) )
lowercase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCamelCase__ , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
lowercase__ = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
lowercase__ = flatten_dict(unfreeze(model.params ) )
lowercase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1e-3 , msg=F'''{key} not identical''' )
def _A ( lowercase__ , lowercase__ ):
lowercase__ = True
lowercase__ = flatten_dict(modela.params )
lowercase__ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
lowercase__ = False
return models_are_equal
@require_flax
class A ( unittest.TestCase ):
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
lowercase__ = FlaxBertModel(lowerCamelCase__ )
lowercase__ = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) )
with self.assertRaises(lowerCamelCase__ ):
lowercase__ = FlaxBertModel.from_pretrained(lowerCamelCase__ )
lowercase__ = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertTrue(check_models_equal(lowerCamelCase__ , lowerCamelCase__ ) )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
lowercase__ = FlaxBertModel(lowerCamelCase__ )
lowercase__ = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , max_shard_size="""10KB""" )
with self.assertRaises(lowerCamelCase__ ):
lowercase__ = FlaxBertModel.from_pretrained(lowerCamelCase__ )
lowercase__ = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertTrue(check_models_equal(lowerCamelCase__ , lowerCamelCase__ ) )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__ = """bert"""
lowercase__ = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(lowerCamelCase__ ):
lowercase__ = FlaxBertModel.from_pretrained(lowerCamelCase__ )
lowercase__ = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = """bert"""
lowercase__ = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(lowerCamelCase__ ):
lowercase__ = FlaxBertModel.from_pretrained(lowerCamelCase__ )
lowercase__ = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
| 325 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) < k or k < 0:
raise ValueError('Invalid Input' )
UpperCamelCase__ = UpperCamelCase__ = sum(array[:k] )
for i in range(len(SCREAMING_SNAKE_CASE ) - k ):
UpperCamelCase__ = current_sum - array[i] + array[i + k]
UpperCamelCase__ = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
A__ : Optional[Any]= [randint(-10_00, 10_00) for i in range(1_00)]
A__ : List[Any]= randint(0, 1_10)
print(F"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 20 |
"""simple docstring"""
import sys
from collections import defaultdict
class __lowerCamelCase :
def __init__( self ) -> Tuple:
UpperCamelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = pos
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCamelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCamelCase__ = 2 * start + 1
else:
UpperCamelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCamelCase__ , UpperCamelCase__ = heap[smallest_child], positions[smallest_child]
UpperCamelCase__ , UpperCamelCase__ = (
heap[start],
positions[start],
)
UpperCamelCase__ , UpperCamelCase__ = temp, tempa
UpperCamelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case_ )
self.top_to_bottom(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = position[index]
while index != 0:
UpperCamelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCamelCase__ = heap[parent]
UpperCamelCase__ = position[parent]
self.set_position(position[parent] , snake_case_ )
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , snake_case_ )
break
UpperCamelCase__ = parent
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , 0 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ , -1 , -1 ):
self.top_to_bottom(snake_case_ , snake_case_ , len(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = positions[0]
UpperCamelCase__ = sys.maxsize
self.top_to_bottom(snake_case_ , 0 , len(snake_case_ ) , snake_case_ )
return temp
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = Heap()
UpperCamelCase__ = [0] * len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = [-1] * len(SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCamelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
UpperCamelCase__ = []
for vertex in range(len(SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(SCREAMING_SNAKE_CASE )
heap.node_position.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = []
UpperCamelCase__ = 1
UpperCamelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCamelCase__ = 0
UpperCamelCase__ = distance
heap.heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for _ in range(1 , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = heap.delete_minimum(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCamelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE )]
):
UpperCamelCase__ = distance
heap.bottom_to_top(
SCREAMING_SNAKE_CASE , heap.get_position(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Dict= int(input("""Enter number of edges: """).strip())
A__ : Dict= defaultdict(list)
for _ in range(edges_number):
A__ : Dict= [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 20 | 1 |
"""simple docstring"""
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_A = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def a__ ( lowerCAmelCase , lowerCAmelCase=None ) -> Union[str, Any]:
require_version(deps[pkg] , __snake_case )
| 182 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__snake_case : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case : Dict = {
'vocab_file': {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt',
},
'tokenizer_file': {
'unc-nlp/lxmert-base-uncased': (
'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'
),
},
}
__snake_case : str = {
'unc-nlp/lxmert-base-uncased': 512,
}
__snake_case : int = {
'unc-nlp/lxmert-base-uncased': {'do_lower_case': True},
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_INIT_CONFIGURATION
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = LxmertTokenizer
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Optional[Any]="[UNK]" , lowerCAmelCase_ : List[Any]="[SEP]" , lowerCAmelCase_ : Dict="[PAD]" , lowerCAmelCase_ : Optional[int]="[CLS]" , lowerCAmelCase_ : int="[MASK]" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Dict=None , **lowerCAmelCase_ : Optional[Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : List[str] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase_ ) != tokenize_chinese_chars
):
A__ : Tuple =getattr(lowerCAmelCase_ , normalizer_state.pop("""type""" ) )
A__ : Optional[int] =do_lower_case
A__ : List[str] =strip_accents
A__ : str =tokenize_chinese_chars
A__ : int =normalizer_class(**lowerCAmelCase_ )
A__ : Dict =do_lower_case
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict=None ) -> Optional[int]:
'''simple docstring'''
A__ : int =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ : Tuple =[self.sep_token_id]
A__ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
A__ : Union[str, Any] =self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 215 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
if nth_term == "":
return [""]
lowercase__ : Union[str, Any] = int(UpperCAmelCase )
lowercase__ : Union[str, Any] = int(UpperCAmelCase )
lowercase__ : list[str] = []
for temp in range(int(UpperCAmelCase ) ):
series.append(F"""1 / {pow(temp + 1 , int(UpperCAmelCase ) )}""" if series else '''1''' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__a: Any = int(input("""Enter the last number (nth term) of the P-Series"""))
__a: Tuple = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 428 | '''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a: Optional[int] = logging.get_logger(__name__)
__a: int = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "efficientformer"
def __init__( self , __lowerCAmelCase = [3, 2, 6, 4] , __lowerCAmelCase = [48, 96, 224, 448] , __lowerCAmelCase = [True, True, True, True] , __lowerCAmelCase = 448 , __lowerCAmelCase = 32 , __lowerCAmelCase = 4 , __lowerCAmelCase = 7 , __lowerCAmelCase = 5 , __lowerCAmelCase = 8 , __lowerCAmelCase = 4 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 16 , __lowerCAmelCase = 3 , __lowerCAmelCase = 3 , __lowerCAmelCase = 3 , __lowerCAmelCase = 2 , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = 1E-5 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.0_2 , __lowerCAmelCase = 1E-12 , __lowerCAmelCase = 224 , __lowerCAmelCase = 1E-05 , **__lowerCAmelCase , ) -> None:
super().__init__(**__lowerCAmelCase )
lowercase__ : Dict = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Optional[Any] = hidden_sizes
lowercase__ : str = num_hidden_layers
lowercase__ : List[Any] = num_attention_heads
lowercase__ : List[Any] = initializer_range
lowercase__ : str = layer_norm_eps
lowercase__ : str = patch_size
lowercase__ : Tuple = num_channels
lowercase__ : Optional[int] = depths
lowercase__ : List[Any] = mlp_expansion_ratio
lowercase__ : Dict = downsamples
lowercase__ : Dict = dim
lowercase__ : Optional[Any] = key_dim
lowercase__ : List[str] = attention_ratio
lowercase__ : Optional[Any] = resolution
lowercase__ : Union[str, Any] = pool_size
lowercase__ : str = downsample_patch_size
lowercase__ : Optional[int] = downsample_stride
lowercase__ : Optional[int] = downsample_pad
lowercase__ : str = drop_path_rate
lowercase__ : List[str] = num_metaad_blocks
lowercase__ : str = distillation
lowercase__ : List[str] = use_layer_scale
lowercase__ : int = layer_scale_init_value
lowercase__ : str = image_size
lowercase__ : List[Any] = batch_norm_eps
| 428 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[str] =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] ={
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A_ ( UpperCamelCase__ ):
_A :Optional[Any] = '''trocr'''
_A :int = ['''past_key_values''']
_A :Dict = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self : int , snake_case__ : Tuple=5_02_65 , snake_case__ : List[str]=10_24 , snake_case__ : Dict=12 , snake_case__ : Optional[Any]=16 , snake_case__ : Tuple=40_96 , snake_case__ : Tuple="gelu" , snake_case__ : Union[str, Any]=5_12 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Any=0.0 , snake_case__ : Union[str, Any]=0.0 , snake_case__ : Optional[Any]=2 , snake_case__ : Optional[int]=0.02 , snake_case__ : Optional[Any]=0.0 , snake_case__ : Dict=True , snake_case__ : Dict=False , snake_case__ : Dict=True , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=1 , snake_case__ : Union[str, Any]=0 , snake_case__ : Tuple=2 , **snake_case__ : str , ):
lowercase = vocab_size
lowercase = d_model
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = activation_function
lowercase = max_position_embeddings
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = init_std
lowercase = decoder_layerdrop
lowercase = use_cache
lowercase = scale_embedding
lowercase = use_learned_position_embeddings
lowercase = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 428 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = position
lowercase__ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowercase__ = []
for position in positions:
lowercase__ , lowercase__ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE )
return permissible_positions
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if is_complete(SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
lowercase__ , lowercase__ = position
if board[y][x] == 0:
lowercase__ = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , curr + 1 ):
return True
lowercase__ = 0
return False
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
lowercase__ = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
lowercase__ = 0
lowercase__ = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __UpperCAmelCase ( unittest.TestCase ):
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.dummy_uncond_unet
lowerCAmelCase_ = PNDMScheduler()
lowerCAmelCase_ = PNDMPipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
pndm.to(_lowerCamelCase )
pndm.set_progress_bar_config(disable=_lowerCamelCase )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pndm(generator=_lowerCamelCase , num_inference_steps=20 , output_type='''numpy''' ).images
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pndm(generator=_lowerCamelCase , num_inference_steps=20 , output_type='''numpy''' , return_dict=_lowerCamelCase )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''google/ddpm-cifar10-32'''
lowerCAmelCase_ = UNetaDModel.from_pretrained(_lowerCamelCase )
lowerCAmelCase_ = PNDMScheduler()
lowerCAmelCase_ = PNDMPipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
pndm.to(_lowerCamelCase )
pndm.set_progress_bar_config(disable=_lowerCamelCase )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pndm(generator=_lowerCamelCase , output_type='''numpy''' ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 606 | '''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : str =16
A_ : Any =32
def snake_case_ ( __snake_case : Accelerator , __snake_case : int = 16) -> List[Any]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained('''bert-base-cased''')
lowerCAmelCase_ = load_dataset('''glue''' , '''mrpc''')
def tokenize_function(__snake_case : Optional[Any]):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ = datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ = tokenized_datasets.rename_column('''label''' , '''labels''')
def collate_fn(__snake_case : int):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ = 8
else:
lowerCAmelCase_ = None
return tokenizer.pad(
__snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCAmelCase_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case)
lowerCAmelCase_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A_ : Any =mocked_dataloaders # noqa: F811
def snake_case_ ( __snake_case : int , __snake_case : int) -> int:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case) == "1":
lowerCAmelCase_ = 2
# New Code #
lowerCAmelCase_ = int(args.gradient_accumulation_steps)
# Initialize accelerator
lowerCAmelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__snake_case)
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''')
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ = config['''lr''']
lowerCAmelCase_ = int(config['''num_epochs'''])
lowerCAmelCase_ = int(config['''seed'''])
lowerCAmelCase_ = int(config['''batch_size'''])
lowerCAmelCase_ = evaluate.load('''glue''' , '''mrpc''')
set_seed(__snake_case)
lowerCAmelCase_ ,lowerCAmelCase_ = get_dataloaders(__snake_case , __snake_case)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ = model.to(accelerator.device)
# Instantiate optimizer
lowerCAmelCase_ = AdamW(params=model.parameters() , lr=__snake_case)
# Instantiate scheduler
lowerCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case)
# Now we train the model
for epoch in range(__snake_case):
model.train()
for step, batch in enumerate(__snake_case):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__snake_case):
lowerCAmelCase_ = model(**__snake_case)
lowerCAmelCase_ = output.loss
accelerator.backward(__snake_case)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
lowerCAmelCase_ = model(**__snake_case)
lowerCAmelCase_ = outputs.logits.argmax(dim=-1)
lowerCAmelCase_ ,lowerCAmelCase_ = accelerator.gather_for_metrics((predictions, batch['''labels''']))
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
lowerCAmelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __snake_case)
def snake_case_ ( ) -> Optional[Any]:
lowerCAmelCase_ = argparse.ArgumentParser(description='''Simple example of training script.''')
parser.add_argument(
'''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__snake_case , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''')
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case , __snake_case)
if __name__ == "__main__":
main()
| 606 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def lowerCAmelCase_ ( snake_case_ : Sequence[int] | None = None ) -> int:
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
UpperCAmelCase_ = nums[0]
for i in range(1 , len(snake_case_ ) ):
UpperCAmelCase_ = nums[i]
UpperCAmelCase_ = max(snake_case_ , ans + num , snake_case_ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
SCREAMING_SNAKE_CASE_: Any =int(input('Enter number of elements : ').strip())
SCREAMING_SNAKE_CASE_: Union[str, Any] =list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 78 |
def __snake_case ( _lowerCAmelCase : list , _lowerCAmelCase : list , _lowerCAmelCase : int ) -> int:
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
A_ : Dict = [p / w for p, w in zip(_lowerCAmelCase , _lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
A_ : Optional[Any] = sorted(_lowerCAmelCase )
# declaring useful variables
A_ : List[Any] = len(_lowerCAmelCase )
A_ : List[Any] = 0
A_ : Any = 0
A_ : List[Any] = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
A_ : List[Any] = sorted_profit_by_weight[length - i - 1]
A_ : List[Any] = profit_by_weight.index(_lowerCAmelCase )
A_ : List[Any] = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
_lowerCAmelCase : int = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
_lowerCAmelCase : Dict = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
_lowerCAmelCase : Union[str, Any] = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 454 | 0 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__a: Union[str, Any] = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ['pixel_values']
def __init__( self , _lowercase = True , _lowercase = None , _lowercase = PILImageResampling.BICUBIC , _lowercase = True , _lowercase = None , _lowercase = True , _lowercase = 1 / 255 , _lowercase = True , _lowercase = IMAGENET_DEFAULT_MEAN , _lowercase = IMAGENET_DEFAULT_STD , **_lowercase , ) -> None:
super().__init__(**_lowercase )
lowercase_ : Any = size if size is not None else {'shortest_edge': 224}
lowercase_ : List[str] = get_size_dict(_lowercase , default_to_square=_lowercase )
lowercase_ : int = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowercase_ : str = get_size_dict(_lowercase , param_name='crop_size' )
lowercase_ : Any = do_resize
lowercase_ : List[Any] = size
lowercase_ : Tuple = resample
lowercase_ : Union[str, Any] = do_center_crop
lowercase_ : List[str] = crop_size
lowercase_ : Any = do_rescale
lowercase_ : Any = rescale_factor
lowercase_ : Any = do_normalize
lowercase_ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ : Optional[int] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase = PILImageResampling.BICUBIC , _lowercase = None , **_lowercase , ) -> np.ndarray:
lowercase_ : Optional[Any] = get_size_dict(_lowercase , default_to_square=_lowercase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowercase_ : Any = int((256 / 224) * size['shortest_edge'] )
lowercase_ : Dict = get_resize_output_image_size(_lowercase , size=_lowercase , default_to_square=_lowercase )
lowercase_ : str = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
_lowercase , size=(size_dict['height'], size_dict['width']) , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> np.ndarray:
lowercase_ : Optional[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> np.ndarray:
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> np.ndarray:
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> BatchFeature:
lowercase_ : int = do_resize if do_resize is not None else self.do_resize
lowercase_ : Optional[int] = resample if resample is not None else self.resample
lowercase_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : str = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Dict = image_mean if image_mean is not None else self.image_mean
lowercase_ : str = image_std if image_std is not None else self.image_std
lowercase_ : List[str] = size if size is not None else self.size
lowercase_ : str = get_size_dict(_lowercase , default_to_square=_lowercase )
lowercase_ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
lowercase_ : Optional[Any] = get_size_dict(_lowercase , param_name='crop_size' )
lowercase_ : Union[str, Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowercase_ : Union[str, Any] = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
lowercase_ : Dict = [self.resize(_lowercase , _lowercase , _lowercase ) for image in images]
if do_center_crop:
lowercase_ : Optional[int] = [self.center_crop(_lowercase , _lowercase ) for image in images]
if do_rescale:
lowercase_ : Optional[Any] = [self.rescale(_lowercase , _lowercase ) for image in images]
if do_normalize:
lowercase_ : Dict = [self.normalize(_lowercase , _lowercase , _lowercase ) for image in images]
lowercase_ : Tuple = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
lowercase_ : Optional[Any] = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 710 |
'''simple docstring'''
def _UpperCAmelCase ( a : list ) -> list:
"""simple docstring"""
for i in range(len(a ) - 1 , 0 , -1 ):
lowercase_ : Any = False
for j in range(a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ : Any = unsorted[j - 1], unsorted[j]
lowercase_ : int = True
for j in range(a ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowercase_ : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
A: Tuple = [int(item) for item in user_input.split(",")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 7 | 0 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_lowerCamelCase : List[str] = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Optional[int] = 4_2
_UpperCAmelCase : Any = 4_2
_UpperCAmelCase : Union[str, Any] = 4_2
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = 4_2
_UpperCAmelCase : Optional[Any] = 4_2
_UpperCAmelCase : str = None
_UpperCAmelCase : List[Any] = None
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = "train"
_UpperCAmelCase : Tuple = "dev"
_UpperCAmelCase : int = "test"
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@staticmethod
def A ( lowercase : str , lowercase : Union[Split, str] ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def A ( lowercase : str ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def A ( lowercase : List[InputExample] , lowercase : List[str] , lowercase : int , lowercase : PreTrainedTokenizer , lowercase : Dict=False , lowercase : str="[CLS]" , lowercase : Optional[Any]=1 , lowercase : Optional[Any]="[SEP]" , lowercase : Optional[int]=False , lowercase : Optional[int]=False , lowercase : int=0 , lowercase : str=0 , lowercase : Optional[Any]=-100 , lowercase : str=0 , lowercase : str=True , ):
'''simple docstring'''
_snake_case = {label: i for i, label in enumerate(lowercase )}
_snake_case = []
for ex_index, example in enumerate(lowercase ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' , lowercase , len(lowercase ) )
_snake_case = []
_snake_case = []
for word, label in zip(example.words , example.labels ):
_snake_case = tokenizer.tokenize(lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(lowercase ) > 0:
tokens.extend(lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_snake_case = tokenizer.num_special_tokens_to_add()
if len(lowercase ) > max_seq_length - special_tokens_count:
_snake_case = tokens[: (max_seq_length - special_tokens_count)]
_snake_case = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_snake_case = [sequence_a_segment_id] * len(lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_snake_case = [cls_token] + tokens
_snake_case = [pad_token_label_id] + label_ids
_snake_case = [cls_token_segment_id] + segment_ids
_snake_case = tokenizer.convert_tokens_to_ids(lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_snake_case = [1 if mask_padding_with_zero else 0] * len(lowercase )
# Zero-pad up to the sequence length.
_snake_case = max_seq_length - len(lowercase )
if pad_on_left:
_snake_case = ([pad_token] * padding_length) + input_ids
_snake_case = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_snake_case = ([pad_token_segment_id] * padding_length) + segment_ids
_snake_case = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(lowercase ) == max_seq_length
assert len(lowercase ) == max_seq_length
assert len(lowercase ) == max_seq_length
assert len(lowercase ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(lowercase ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(lowercase ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(lowercase ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(lowercase ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_snake_case = None
features.append(
InputFeatures(
input_ids=lowercase , attention_mask=lowercase , token_type_ids=lowercase , label_ids=lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[str] = 4_2
_UpperCAmelCase : Optional[Any] = nn.CrossEntropyLoss().ignore_index
def __init__( self : List[Any] , lowercase : TokenClassificationTask , lowercase : str , lowercase : PreTrainedTokenizer , lowercase : List[str] , lowercase : str , lowercase : Optional[int] = None , lowercase : Optional[Any]=False , lowercase : Split = Split.train , ):
'''simple docstring'''
_snake_case = os.path.join(
lowercase , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_snake_case = cached_features_file + """.lock"""
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
_snake_case = torch.load(lowercase )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
_snake_case = token_classification_task.read_examples_from_file(lowercase , lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
_snake_case = token_classification_task.convert_examples_to_features(
lowercase , lowercase , lowercase , lowercase , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , lowercase )
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Optional[Any] , lowercase : str ):
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : List[Any] = 4_2
_UpperCAmelCase : List[Any] = -1_0_0
def __init__( self : str , lowercase : TokenClassificationTask , lowercase : str , lowercase : PreTrainedTokenizer , lowercase : List[str] , lowercase : str , lowercase : Optional[int] = None , lowercase : Optional[Any]=False , lowercase : Split = Split.train , ):
'''simple docstring'''
_snake_case = token_classification_task.read_examples_from_file(lowercase , lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
_snake_case = token_classification_task.convert_examples_to_features(
lowercase , lowercase , lowercase , lowercase , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_snake_case = tf.data.Dataset.from_generator(
lowercase , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_snake_case = tf.data.Dataset.from_generator(
lowercase , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Dict ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : List[str] , lowercase : Tuple ):
'''simple docstring'''
return self.features[i] | 686 |
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def lowerCamelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : list[int] , _UpperCamelCase : int ) -> list[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = [0] * no_of_processes
__UpperCAmelCase : List[str] = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_UpperCamelCase ):
__UpperCAmelCase : Union[str, Any] = burst_time[i]
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : str = 0
__UpperCAmelCase : Dict = 9_9_9_9_9_9_9_9_9
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[str] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_UpperCamelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__UpperCAmelCase : List[Any] = remaining_time[j]
__UpperCAmelCase : Tuple = j
__UpperCAmelCase : Any = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__UpperCAmelCase : Dict = remaining_time[short]
if minm == 0:
__UpperCAmelCase : List[str] = 9_9_9_9_9_9_9_9_9
if remaining_time[short] == 0:
complete += 1
__UpperCAmelCase : Dict = False
# Find finish time of current process
__UpperCAmelCase : int = increment_time + 1
# Calculate waiting time
__UpperCAmelCase : List[Any] = finish_time - arrival_time[short]
__UpperCAmelCase : Tuple = finar - burst_time[short]
if waiting_time[short] < 0:
__UpperCAmelCase : List[Any] = 0
# Increment time
increment_time += 1
return waiting_time
def lowerCamelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : int , _UpperCamelCase : list[int] ) -> list[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = [0] * no_of_processes
for i in range(_UpperCamelCase ):
__UpperCAmelCase : List[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
def lowerCamelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : list[int] , _UpperCamelCase : int ) -> None:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : Tuple = 0
for i in range(_UpperCamelCase ):
__UpperCAmelCase : Optional[Any] = total_waiting_time + waiting_time[i]
__UpperCAmelCase : Dict = total_turn_around_time + turn_around_time[i]
print(f'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
UpperCAmelCase : Optional[Any] = int(input())
UpperCAmelCase : Any = [0] * no_of_processes
UpperCAmelCase : Tuple = [0] * no_of_processes
UpperCAmelCase : Tuple = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
UpperCAmelCase , UpperCAmelCase : Tuple = map(int, input().split())
UpperCAmelCase : List[str] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCAmelCase : Dict = burst_time
UpperCAmelCase : Any = no_of_processes
UpperCAmelCase : int = waiting_time
UpperCAmelCase : Union[str, Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
UpperCAmelCase : List[Any] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 139 | 0 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
__UpperCamelCase : Dict = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
__UpperCamelCase : List[Any] = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
__UpperCamelCase : List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def _snake_case ( self : int ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def _snake_case ( self : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str=None , _lowerCamelCase : Tuple=True , _lowerCamelCase : int=False ):
'''simple docstring'''
if rouge_types is None:
__lowerCamelCase : int = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
__lowerCamelCase : Tuple = rouge_scorer.RougeScorer(rouge_types=_lowerCamelCase , use_stemmer=_lowerCamelCase )
if use_aggregator:
__lowerCamelCase : List[Any] = scoring.BootstrapAggregator()
else:
__lowerCamelCase : Optional[Any] = []
for ref, pred in zip(_lowerCamelCase , _lowerCamelCase ):
__lowerCamelCase : Optional[int] = scorer.score(_lowerCamelCase , _lowerCamelCase )
if use_aggregator:
aggregator.add_scores(_lowerCamelCase )
else:
scores.append(_lowerCamelCase )
if use_aggregator:
__lowerCamelCase : str = aggregator.aggregate()
else:
__lowerCamelCase : Tuple = {}
for key in scores[0]:
__lowerCamelCase : Union[str, Any] = [score[key] for score in scores]
return result
| 458 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : str , _lowerCamelCase : str = "▁" , _lowerCamelCase : bool = True , _lowerCamelCase : Union[str, AddedToken] = "<unk>" , _lowerCamelCase : Union[str, AddedToken] = "</s>" , _lowerCamelCase : Union[str, AddedToken] = "<pad>" , ):
'''simple docstring'''
__lowerCamelCase : Tuple = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
__lowerCamelCase : Optional[Any] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowerCamelCase : Tuple = token_dict["""token"""]
__lowerCamelCase : Optional[int] = Tokenizer(Unigram() )
__lowerCamelCase : str = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
__lowerCamelCase : int = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCamelCase , add_prefix_space=_lowerCamelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCamelCase ),
pre_tokenizers.Punctuation(),
] )
__lowerCamelCase : Tuple = decoders.Metaspace(replacement=_lowerCamelCase , add_prefix_space=_lowerCamelCase )
__lowerCamelCase : Tuple = TemplateProcessing(
single=F"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
__lowerCamelCase : Tuple = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(_lowerCamelCase , _lowerCamelCase )
def _snake_case ( self : Dict , _lowerCamelCase : Union[str, List[str]] , _lowerCamelCase : int = 8_0_0_0 , _lowerCamelCase : bool = True , ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase , special_tokens=self.special_tokens_list , show_progress=_lowerCamelCase , )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__lowerCamelCase : Optional[int] = [files]
self._tokenizer.train(_lowerCamelCase , trainer=_lowerCamelCase )
self.add_unk_id()
def _snake_case ( self : List[Any] , _lowerCamelCase : Union[Iterator[str], Iterator[Iterator[str]]] , _lowerCamelCase : int = 8_0_0_0 , _lowerCamelCase : bool = True , ):
'''simple docstring'''
__lowerCamelCase : Any = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase , special_tokens=self.special_tokens_list , show_progress=_lowerCamelCase , )
self._tokenizer.train_from_iterator(_lowerCamelCase , trainer=_lowerCamelCase )
self.add_unk_id()
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowerCamelCase : List[str] = json.loads(self._tokenizer.to_str() )
__lowerCamelCase : Optional[int] = self.special_tokens["""unk"""]["""id"""]
__lowerCamelCase : List[str] = Tokenizer.from_str(json.dumps(_lowerCamelCase ) )
| 458 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.