code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : Any = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 447 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : Dict = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 447 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Optional[int] = tempfile.mkdtemp()
A__ : Optional[int] = BlipImageProcessor()
A__ : Union[str, Any] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
A__ : List[str] = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
A__ : Union[str, Any] = InstructBlipProcessor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self , **UpperCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).tokenizer
def __snake_case ( self , **UpperCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor
def __snake_case ( self , **UpperCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).qformer_tokenizer
def __snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ):
A__ : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A__ : Any = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self ):
A__ : List[str] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A__ : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A__ : Dict = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
A__ : Tuple = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor.qformer_tokenizer , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Dict = self.get_image_processor()
A__ : Any = self.get_tokenizer()
A__ : Optional[int] = self.get_qformer_tokenizer()
A__ : List[str] = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
A__ : List[Any] = self.prepare_image_inputs()
A__ : List[Any] = image_processor(UpperCamelCase__ , return_tensors='''np''' )
A__ : List[str] = processor(images=UpperCamelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case ( self ):
A__ : Optional[int] = self.get_image_processor()
A__ : str = self.get_tokenizer()
A__ : List[Any] = self.get_qformer_tokenizer()
A__ : Union[str, Any] = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
A__ : List[Any] = '''lower newer'''
A__ : Union[str, Any] = processor(text=UpperCamelCase__ )
A__ : List[Any] = tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
A__ : Dict = qformer_tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def __snake_case ( self ):
A__ : List[str] = self.get_image_processor()
A__ : Optional[Any] = self.get_tokenizer()
A__ : List[Any] = self.get_qformer_tokenizer()
A__ : int = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
A__ : Optional[int] = '''lower newer'''
A__ : Optional[int] = self.prepare_image_inputs()
A__ : Any = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def __snake_case ( self ):
A__ : List[str] = self.get_image_processor()
A__ : Tuple = self.get_tokenizer()
A__ : str = self.get_qformer_tokenizer()
A__ : Any = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
A__ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ : Optional[int] = processor.batch_decode(UpperCamelCase__ )
A__ : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Optional[int] = self.get_image_processor()
A__ : Optional[int] = self.get_tokenizer()
A__ : str = self.get_qformer_tokenizer()
A__ : Optional[int] = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
A__ : Optional[Any] = '''lower newer'''
A__ : Any = self.prepare_image_inputs()
A__ : Dict = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , ) | 55 |
import numpy as np
_SCREAMING_SNAKE_CASE : Any = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
A__ : List[Any] = np.array(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ , A__ : Any = np.where(letter == self.SQUARE )
A__ : int = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = message.lower()
A__ : str = message.replace(''' ''' , '''''' )
A__ : Union[str, Any] = message.replace('''j''' , '''i''' )
A__ : List[Any] = np.empty((2, len(UpperCamelCase__ )) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : Any = self.letter_to_numbers(message[letter_index] )
A__ : Optional[Any] = numbers[0]
A__ : List[str] = numbers[1]
A__ : List[str] = first_step.reshape(2 * len(UpperCamelCase__ ) )
A__ : List[Any] = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Dict = int(second_step[numbers_index * 2] )
A__ : List[str] = int(second_step[(numbers_index * 2) + 1] )
A__ : Dict = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = encoded_message + letter
return encoded_message
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = message.lower()
message.replace(''' ''' , '''''' )
A__ : List[Any] = np.empty(2 * len(UpperCamelCase__ ) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : List[str] = self.letter_to_numbers(message[letter_index] )
A__ : Dict = numbers[0]
A__ : int = numbers[1]
A__ : Optional[Any] = first_step.reshape((2, len(UpperCamelCase__ )) )
A__ : int = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Tuple = int(second_step[0, numbers_index] )
A__ : Dict = int(second_step[1, numbers_index] )
A__ : List[str] = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = decoded_message + letter
return decoded_message | 55 | 1 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
snake_case_ = 'CompVis/stable-diffusion-v1-1'
snake_case_ = 'CompVis/stable-diffusion-v1-2'
snake_case_ = 'CompVis/stable-diffusion-v1-3'
snake_case_ = 'CompVis/stable-diffusion-v1-4'
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = True , ):
"""simple docstring"""
super()._init_()
SCREAMING_SNAKE_CASE_ : Any = StableDiffusionPipeline.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionPipeline.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionPipeline.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = StableDiffusionPipeline(
vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , unet=lowercase__ , scheduler=lowercase__ , safety_checker=lowercase__ , feature_extractor=lowercase__ , requires_safety_checker=lowercase__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return {k: getattr(self , lowercase__ ) for k in self.config.keys() if not k.startswith("_" )}
def __lowerCamelCase ( self , lowercase__ = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE_ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
self.enable_attention_slicing(lowercase__ )
@torch.no_grad()
def __lowerCamelCase ( self , lowercase__ , lowercase__ = 512 , lowercase__ = 512 , lowercase__ = 50 , lowercase__ = 7.5 , lowercase__ = None , lowercase__ = 1 , lowercase__ = 0.0 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , lowercase__ = None , lowercase__ = 1 , **lowercase__ , ):
"""simple docstring"""
return self.pipea(
prompt=lowercase__ , height=lowercase__ , width=lowercase__ , num_inference_steps=lowercase__ , guidance_scale=lowercase__ , negative_prompt=lowercase__ , num_images_per_prompt=lowercase__ , eta=lowercase__ , generator=lowercase__ , latents=lowercase__ , output_type=lowercase__ , return_dict=lowercase__ , callback=lowercase__ , callback_steps=lowercase__ , **lowercase__ , )
@torch.no_grad()
def __lowerCamelCase ( self , lowercase__ , lowercase__ = 512 , lowercase__ = 512 , lowercase__ = 50 , lowercase__ = 7.5 , lowercase__ = None , lowercase__ = 1 , lowercase__ = 0.0 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , lowercase__ = None , lowercase__ = 1 , **lowercase__ , ):
"""simple docstring"""
return self.pipea(
prompt=lowercase__ , height=lowercase__ , width=lowercase__ , num_inference_steps=lowercase__ , guidance_scale=lowercase__ , negative_prompt=lowercase__ , num_images_per_prompt=lowercase__ , eta=lowercase__ , generator=lowercase__ , latents=lowercase__ , output_type=lowercase__ , return_dict=lowercase__ , callback=lowercase__ , callback_steps=lowercase__ , **lowercase__ , )
@torch.no_grad()
def __lowerCamelCase ( self , lowercase__ , lowercase__ = 512 , lowercase__ = 512 , lowercase__ = 50 , lowercase__ = 7.5 , lowercase__ = None , lowercase__ = 1 , lowercase__ = 0.0 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , lowercase__ = None , lowercase__ = 1 , **lowercase__ , ):
"""simple docstring"""
return self.pipea(
prompt=lowercase__ , height=lowercase__ , width=lowercase__ , num_inference_steps=lowercase__ , guidance_scale=lowercase__ , negative_prompt=lowercase__ , num_images_per_prompt=lowercase__ , eta=lowercase__ , generator=lowercase__ , latents=lowercase__ , output_type=lowercase__ , return_dict=lowercase__ , callback=lowercase__ , callback_steps=lowercase__ , **lowercase__ , )
@torch.no_grad()
def __lowerCamelCase ( self , lowercase__ , lowercase__ = 512 , lowercase__ = 512 , lowercase__ = 50 , lowercase__ = 7.5 , lowercase__ = None , lowercase__ = 1 , lowercase__ = 0.0 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , lowercase__ = None , lowercase__ = 1 , **lowercase__ , ):
"""simple docstring"""
return self.pipea(
prompt=lowercase__ , height=lowercase__ , width=lowercase__ , num_inference_steps=lowercase__ , guidance_scale=lowercase__ , negative_prompt=lowercase__ , num_images_per_prompt=lowercase__ , eta=lowercase__ , generator=lowercase__ , latents=lowercase__ , output_type=lowercase__ , return_dict=lowercase__ , callback=lowercase__ , callback_steps=lowercase__ , **lowercase__ , )
@torch.no_grad()
def __lowerCamelCase ( self , lowercase__ , lowercase__ = 512 , lowercase__ = 512 , lowercase__ = 50 , lowercase__ = 7.5 , lowercase__ = None , lowercase__ = 1 , lowercase__ = 0.0 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , lowercase__ = None , lowercase__ = 1 , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = "cuda" if torch.cuda.is_available() else "cpu"
self.to(lowercase__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE_ : List[str] = self.textaimg_sda_a(
prompt=lowercase__ , height=lowercase__ , width=lowercase__ , num_inference_steps=lowercase__ , guidance_scale=lowercase__ , negative_prompt=lowercase__ , num_images_per_prompt=lowercase__ , eta=lowercase__ , generator=lowercase__ , latents=lowercase__ , output_type=lowercase__ , return_dict=lowercase__ , callback=lowercase__ , callback_steps=lowercase__ , **lowercase__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE_ : str = self.textaimg_sda_a(
prompt=lowercase__ , height=lowercase__ , width=lowercase__ , num_inference_steps=lowercase__ , guidance_scale=lowercase__ , negative_prompt=lowercase__ , num_images_per_prompt=lowercase__ , eta=lowercase__ , generator=lowercase__ , latents=lowercase__ , output_type=lowercase__ , return_dict=lowercase__ , callback=lowercase__ , callback_steps=lowercase__ , **lowercase__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE_ : Tuple = self.textaimg_sda_a(
prompt=lowercase__ , height=lowercase__ , width=lowercase__ , num_inference_steps=lowercase__ , guidance_scale=lowercase__ , negative_prompt=lowercase__ , num_images_per_prompt=lowercase__ , eta=lowercase__ , generator=lowercase__ , latents=lowercase__ , output_type=lowercase__ , return_dict=lowercase__ , callback=lowercase__ , callback_steps=lowercase__ , **lowercase__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.textaimg_sda_a(
prompt=lowercase__ , height=lowercase__ , width=lowercase__ , num_inference_steps=lowercase__ , guidance_scale=lowercase__ , negative_prompt=lowercase__ , num_images_per_prompt=lowercase__ , eta=lowercase__ , generator=lowercase__ , latents=lowercase__ , output_type=lowercase__ , return_dict=lowercase__ , callback=lowercase__ , callback_steps=lowercase__ , **lowercase__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 421 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
snake_case_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
snake_case_ = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
snake_case_ = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def __lowerCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def __lowerCamelCase ( self , lowercase__=None , lowercase__=None , lowercase__=False ):
"""simple docstring"""
if concatenate_texts:
return compute_measures(lowercase__ , lowercase__ )["wer"]
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : List[str] = 0
for prediction, reference in zip(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = compute_measures(lowercase__ , lowercase__ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 421 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 712 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Tuple = 'speech_to_text_2'
lowerCamelCase : int = ['past_key_values']
lowerCamelCase : int = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self: Optional[Any] , _UpperCAmelCase: Optional[Any]=1_0000 , _UpperCAmelCase: Union[str, Any]=6 , _UpperCAmelCase: Optional[int]=2048 , _UpperCAmelCase: Optional[Any]=4 , _UpperCAmelCase: Any=0.0 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Any="relu" , _UpperCAmelCase: Dict=256 , _UpperCAmelCase: int=0.1 , _UpperCAmelCase: List[str]=0.0 , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: List[Any]=0.0_2 , _UpperCAmelCase: int=2 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=1 , _UpperCAmelCase: Dict=0 , _UpperCAmelCase: Optional[Any]=2 , _UpperCAmelCase: Tuple=1024 , **_UpperCAmelCase: str , ):
_lowerCAmelCase :List[Any] = vocab_size
_lowerCAmelCase :Tuple = d_model
_lowerCAmelCase :Any = decoder_ffn_dim
_lowerCAmelCase :List[str] = decoder_layers
_lowerCAmelCase :Optional[int] = decoder_attention_heads
_lowerCAmelCase :List[str] = dropout
_lowerCAmelCase :Optional[int] = attention_dropout
_lowerCAmelCase :Tuple = activation_dropout
_lowerCAmelCase :List[Any] = activation_function
_lowerCAmelCase :List[str] = init_std
_lowerCAmelCase :Dict = decoder_layerdrop
_lowerCAmelCase :List[Any] = use_cache
_lowerCAmelCase :List[Any] = decoder_layers
_lowerCAmelCase :Any = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase :Tuple = max_target_positions
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , ) | 382 | 0 |
from heapq import heappop, heappush
import numpy as np
def a__ ( snake_case , snake_case , snake_case , snake_case , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = grid.shape
__SCREAMING_SNAKE_CASE : List[str] = [-1, 1, 0, 0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__SCREAMING_SNAKE_CASE : List[Any] = [(0, source)], set()
__SCREAMING_SNAKE_CASE : Optional[int] = np.full((rows, cols) , np.inf )
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : Dict = np.empty((rows, cols) , dtype=snake_case )
__SCREAMING_SNAKE_CASE : int = None
while queue:
(__SCREAMING_SNAKE_CASE) : Optional[Any] = heappop(snake_case )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
while (x, y) != source:
path.append((x, y) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = predecessors[x, y]
path.append(snake_case ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(snake_case ) ):
__SCREAMING_SNAKE_CASE : Optional[int] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__SCREAMING_SNAKE_CASE : str = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(snake_case , (dist + 1, (nx, ny)) )
__SCREAMING_SNAKE_CASE : Dict = dist + 1
__SCREAMING_SNAKE_CASE : Dict = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 |
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Any ) -> Any:
# Initialise PyTorch model
lowerCAmelCase__ : Any = MobileBertConfig.from_json_file(lowerCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
lowerCAmelCase__ : str = MobileBertForPreTraining(lowerCamelCase )
# Load weights from tf checkpoint
lowerCAmelCase__ : Union[str, Any] = load_tf_weights_in_mobilebert(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowerCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 308 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : List[Any] = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : List[Any] = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : int = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : List[str] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
snake_case__ : Any = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
snake_case__ : str = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
snake_case__ : Any = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
snake_case__ : int = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
snake_case__ : List[str] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :Dict = VOCAB_FILES_NAMES
lowerCamelCase_ :Any = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ :Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ :Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ :Dict = DPRContextEncoderTokenizer
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :str = VOCAB_FILES_NAMES
lowerCamelCase_ :Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ :Tuple = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ :str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ :Any = DPRQuestionEncoderTokenizer
snake_case__ : Union[str, Any] = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
snake_case__ : Union[str, Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
snake_case__ : Optional[int] = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(lowerCamelCase_ )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = None , **snake_case_ , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
elif titles is None or texts is None:
UpperCAmelCase_ : Optional[Any] = titles if texts is None else texts
return super().__call__(
snake_case_ , snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
UpperCAmelCase_ : Union[str, Any] = titles if not isinstance(snake_case_ , snake_case_ ) else [titles]
UpperCAmelCase_ : Dict = texts if not isinstance(snake_case_ , snake_case_ ) else [texts]
UpperCAmelCase_ : Optional[Any] = len(snake_case_ )
UpperCAmelCase_ : Dict = questions if not isinstance(snake_case_ , snake_case_ ) else [questions] * n_passages
assert len(snake_case_ ) == len(
snake_case_ ), F'''There should be as many titles than texts but got {len(snake_case_ )} titles and {len(snake_case_ )} texts.'''
UpperCAmelCase_ : List[Any] = super().__call__(snake_case_ , snake_case_ , padding=snake_case_ , truncation=snake_case_ )['input_ids']
UpperCAmelCase_ : int = super().__call__(snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ )['input_ids']
UpperCAmelCase_ : Tuple = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(snake_case_ , snake_case_ )
]
}
if return_attention_mask is not False:
UpperCAmelCase_ : Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase_ : Optional[Any] = attention_mask
return self.pad(snake_case_ , padding=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ = 1_6 , snake_case_ = 6_4 , snake_case_ = 4 , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = reader_input['input_ids']
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = reader_output[:3]
UpperCAmelCase_ : Optional[Any] = len(snake_case_ )
UpperCAmelCase_ : Dict = sorted(range(snake_case_ ) , reverse=snake_case_ , key=relevance_logits.__getitem__ )
UpperCAmelCase_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCAmelCase_ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase_ : List[str] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase_ : Optional[int] = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase_ : Union[str, Any] = len(snake_case_ )
UpperCAmelCase_ : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=snake_case_ , top_spans=snake_case_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=snake_case_ , start_index=snake_case_ , end_index=snake_case_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(snake_case_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
'''simple docstring'''
UpperCAmelCase_ : int = []
for start_index, start_score in enumerate(snake_case_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase_ : str = sorted(snake_case_ , key=lambda snake_case_ : x[1] , reverse=snake_case_ )
UpperCAmelCase_ : str = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
UpperCAmelCase_ : int = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(snake_case_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCamelCase_ )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase_ :Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ :Optional[int] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ :Dict = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ :Optional[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase_ :List[Any] = DPRReaderTokenizer
| 389 | '''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 3_2 , snake_case_ = 6_4 , snake_case_ = 2_0 , snake_case_ = 7_6_8 , snake_case_=7_7 , snake_case_=4 , snake_case_ = 0.0 , snake_case_ = "silu" , snake_case_ = None , snake_case_ = None , snake_case_ = "linear" , snake_case_ = "prd" , snake_case_ = None , snake_case_ = None , snake_case_ = None , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Dict = attention_head_dim
UpperCAmelCase_ : int = num_attention_heads * attention_head_dim
UpperCAmelCase_ : str = additional_embeddings
UpperCAmelCase_ : List[Any] = time_embed_dim or inner_dim
UpperCAmelCase_ : Tuple = embedding_proj_dim or embedding_dim
UpperCAmelCase_ : Union[str, Any] = clip_embed_dim or embedding_dim
UpperCAmelCase_ : Tuple = Timesteps(snake_case_ , snake_case_ , 0 )
UpperCAmelCase_ : Tuple = TimestepEmbedding(snake_case_ , snake_case_ , out_dim=snake_case_ , act_fn=snake_case_ )
UpperCAmelCase_ : Union[str, Any] = nn.Linear(snake_case_ , snake_case_ )
if embedding_proj_norm_type is None:
UpperCAmelCase_ : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase_ : Dict = nn.LayerNorm(snake_case_ )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCAmelCase_ : Tuple = nn.Linear(snake_case_ , snake_case_ )
if encoder_hid_proj_type is None:
UpperCAmelCase_ : List[Any] = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase_ : Tuple = nn.Linear(snake_case_ , snake_case_ )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCAmelCase_ : Dict = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , snake_case_ ) )
if added_emb_type == "prd":
UpperCAmelCase_ : Tuple = nn.Parameter(torch.zeros(1 , 1 , snake_case_ ) )
elif added_emb_type is None:
UpperCAmelCase_ : str = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCAmelCase_ : Dict = nn.ModuleList(
[
BasicTransformerBlock(
snake_case_ , snake_case_ , snake_case_ , dropout=snake_case_ , activation_fn='gelu' , attention_bias=snake_case_ , )
for d in range(snake_case_ )
] )
if norm_in_type == "layer":
UpperCAmelCase_ : int = nn.LayerNorm(snake_case_ )
elif norm_in_type is None:
UpperCAmelCase_ : List[str] = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCAmelCase_ : int = nn.LayerNorm(snake_case_ )
UpperCAmelCase_ : int = nn.Linear(snake_case_ , snake_case_ )
UpperCAmelCase_ : List[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase_ : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , snake_case_ , persistent=snake_case_ )
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.zeros(1 , snake_case_ ) )
UpperCAmelCase_ : Any = nn.Parameter(torch.zeros(1 , snake_case_ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = {}
def fn_recursive_add_processors(snake_case_ , snake_case_ , snake_case_ ):
if hasattr(snake_case_ , 'set_processor' ):
UpperCAmelCase_ : Any = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , snake_case_ , snake_case_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(snake_case_ , snake_case_ , snake_case_ )
return processors
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = len(self.attn_processors.keys() )
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(snake_case_ )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(snake_case_ , snake_case_ , snake_case_ ):
if hasattr(snake_case_ , 'set_processor' ):
if not isinstance(snake_case_ , snake_case_ ):
module.set_processor(snake_case_ )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , snake_case_ , snake_case_ )
for name, module in self.named_children():
fn_recursive_attn_processor(snake_case_ , snake_case_ , snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = True , ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = hidden_states.shape[0]
UpperCAmelCase_ : Any = timestep
if not torch.is_tensor(snake_case_ ):
UpperCAmelCase_ : Any = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(snake_case_ ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ : Union[str, Any] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ : Optional[Any] = timesteps * torch.ones(snake_case_ , dtype=timesteps.dtype , device=timesteps.device )
UpperCAmelCase_ : List[str] = self.time_proj(snake_case_ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase_ : List[str] = timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase_ : List[Any] = self.time_embedding(snake_case_ )
if self.embedding_proj_norm is not None:
UpperCAmelCase_ : Union[str, Any] = self.embedding_proj_norm(snake_case_ )
UpperCAmelCase_ : Tuple = self.embedding_proj(snake_case_ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase_ : Tuple = self.encoder_hidden_states_proj(snake_case_ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
UpperCAmelCase_ : Optional[int] = self.proj_in(snake_case_ )
UpperCAmelCase_ : Tuple = self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Tuple = 0
if encoder_hidden_states is not None:
additional_embeds.append(snake_case_ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase_ : Dict = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase_ : str = hidden_states[:, None, :]
UpperCAmelCase_ : Optional[Any] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase_ : Dict = self.prd_embedding.to(hidden_states.dtype ).expand(snake_case_ , -1 , -1 )
additional_embeds.append(snake_case_ )
UpperCAmelCase_ : Union[str, Any] = torch.cat(
snake_case_ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase_ : Union[str, Any] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase_ : Optional[int] = F.pad(
snake_case_ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCAmelCase_ : List[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase_ : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
UpperCAmelCase_ : Dict = F.pad(snake_case_ , (0, self.additional_embeddings) , value=0.0 )
UpperCAmelCase_ : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase_ : str = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCAmelCase_ : List[str] = self.norm_in(snake_case_ )
for block in self.transformer_blocks:
UpperCAmelCase_ : List[str] = block(snake_case_ , attention_mask=snake_case_ )
UpperCAmelCase_ : Dict = self.norm_out(snake_case_ )
if self.prd_embedding is not None:
UpperCAmelCase_ : Optional[Any] = hidden_states[:, -1]
else:
UpperCAmelCase_ : List[Any] = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase_ : List[Any] = self.proj_to_clip_embeddings(snake_case_ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=snake_case_ )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Any = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 389 | 1 |
'''simple docstring'''
import os
def A_( A : Optional[Any] = "matrix.txt"):
with open(os.path.join(os.path.dirname(UpperCamelCase__) , UpperCamelCase__)) as in_file:
UpperCamelCase = in_file.read()
UpperCamelCase = [[int(UpperCamelCase__) for cell in row.split(',')] for row in data.strip().splitlines()]
UpperCamelCase = [[0 for cell in row] for row in grid]
UpperCamelCase = len(grid[0])
UpperCamelCase = [[0 for i in range(UpperCamelCase__)] for j in range(UpperCamelCase__)]
UpperCamelCase = grid[0][0]
for i in range(1 , UpperCamelCase__):
UpperCamelCase = grid[0][i] + dp[0][i - 1]
for i in range(1 , UpperCamelCase__):
UpperCamelCase = grid[i][0] + dp[i - 1][0]
for i in range(1 , UpperCamelCase__):
for j in range(1 , UpperCamelCase__):
UpperCamelCase = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1])
return dp[-1][-1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 3 | '''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__UpperCAmelCase =object()
# For specifying empty leaf dict `{}`
__UpperCAmelCase =object()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
__lowerCamelCase = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
__lowerCamelCase = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[int]:
def replace(UpperCamelCase__ , UpperCamelCase__ ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def __lowerCAmelCase ( ) -> Dict:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __lowerCAmelCase ( UpperCamelCase__ ) -> int:
__lowerCamelCase = _get_partition_rules()
__lowerCamelCase = _replacement_rules(UpperCamelCase__ )
__lowerCamelCase = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
__lowerCamelCase = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 546 | 0 |
'''simple docstring'''
from datetime import datetime
import requests
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] ='https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
_UpperCAmelCase : Optional[int] =requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(__lowerCamelCase ).content
if __name__ == "__main__":
lowercase =input('Enter Video/IGTV url: ').strip()
lowercase =F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 721 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowercase =[int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : int =os.path.dirname(os.path.realpath(__lowerCamelCase ) )
_UpperCAmelCase : List[Any] =os.path.join(__lowerCamelCase , 'words.txt' )
_UpperCAmelCase : int =''
with open(__lowerCamelCase ) as f:
_UpperCAmelCase : Tuple =f.readline()
_UpperCAmelCase : List[str] =[word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_UpperCAmelCase : Dict =[
word
for word in [sum(ord(__lowerCamelCase ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 331 | 0 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = (CMStochasticIterativeScheduler,)
__magic_name__ = 10
def lowerCAmelCase__ ( self , **snake_case_ ):
_A = {
'num_train_timesteps': 201,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
config.update(**snake_case_ )
return config
def lowerCAmelCase__ ( self ):
_A = 10
_A = self.get_scheduler_config()
_A = self.scheduler_classes[0](**snake_case_ )
scheduler.set_timesteps(snake_case_ )
_A = scheduler.timesteps[0]
_A = scheduler.timesteps[1]
_A = self.dummy_sample
_A = 0.1 * sample
_A = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
_A = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase__ ( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def lowerCAmelCase__ ( self ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**snake_case_ )
_A = 1
scheduler.set_timesteps(snake_case_ )
_A = scheduler.timesteps
_A = torch.manual_seed(0 )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(snake_case_ ):
# 1. scale model input
_A = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
_A = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
_A = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
_A = pred_prev_sample
_A = torch.sum(torch.abs(snake_case_ ) )
_A = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1E-2
assert abs(result_mean.item() - 0.2510 ) < 1E-3
def lowerCAmelCase__ ( self ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**snake_case_ )
_A = [106, 0]
scheduler.set_timesteps(timesteps=snake_case_ )
_A = scheduler.timesteps
_A = torch.manual_seed(0 )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
_A = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
_A = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
_A = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
_A = pred_prev_sample
_A = torch.sum(torch.abs(snake_case_ ) )
_A = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1E-2
assert abs(result_mean.item() - 0.4527 ) < 1E-3
def lowerCAmelCase__ ( self ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**snake_case_ )
_A = [39, 30, 12, 15, 0]
with self.assertRaises(snake_case_ , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**snake_case_ )
_A = [39, 30, 12, 1, 0]
_A = len(snake_case_ )
with self.assertRaises(snake_case_ , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=snake_case_ , timesteps=snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**snake_case_ )
_A = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=snake_case_ )
| 27 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
lowercase = model.config
lowercase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
lowercase = MBartConfig(
is_decoder=lowercase_ , is_encoder_decoder=lowercase_ , add_cross_attention=lowercase_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowercase_ , add_final_layer_norm=lowercase_ , )
return encoder_config, decoder_config
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
if "encoder.model" in name:
lowercase = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
lowercase = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
lowercase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
lowercase = """encoder.""" + name
if "attn.proj" in name:
lowercase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
lowercase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
lowercase = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
lowercase = """encoder.layernorm.bias"""
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ):
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[3] )
lowercase = int(key_split[5] )
lowercase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : str=None , lowercase_ : Optional[Any]=False ):
# load original model
lowercase = DonutModel.from_pretrained(lowercase_ ).eval()
# load HuggingFace model
lowercase , lowercase = get_configs(lowercase_ )
lowercase = DonutSwinModel(lowercase_ )
lowercase = MBartForCausalLM(lowercase_ )
lowercase = VisionEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
model.eval()
lowercase = original_model.state_dict()
lowercase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# verify results on scanned document
lowercase = load_dataset("""hf-internal-testing/example-documents""" )
lowercase = dataset["""test"""][0]["""image"""].convert("""RGB""" )
lowercase = XLMRobertaTokenizerFast.from_pretrained(lowercase_ , from_slow=lowercase_ )
lowercase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowercase = DonutProcessor(lowercase_ , lowercase_ )
lowercase = processor(lowercase_ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowercase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowercase = """When is the coffee break?"""
lowercase = task_prompt.replace("""{user_input}""" , lowercase_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowercase = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowercase = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowercase = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowercase = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowercase = """hello world"""
else:
raise ValueError("""Model name not supported""" )
lowercase = original_model.decoder.tokenizer(lowercase_ , add_special_tokens=lowercase_ , return_tensors="""pt""" )[
"""input_ids"""
]
lowercase = original_model.encoder.model.patch_embed(lowercase_ )
lowercase , lowercase = model.encoder.embeddings(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
# verify encoder hidden states
lowercase = original_model.encoder(lowercase_ )
lowercase = model.encoder(lowercase_ ).last_hidden_state
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-2 )
# verify decoder hidden states
lowercase = original_model(lowercase_ , lowercase_ , lowercase_ ).logits
lowercase = model(lowercase_ , decoder_input_ids=lowercase_ ).logits
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
lowercase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
lowercase_ : Dict = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 588 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCAmelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCAmelCase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a__ )
class __UpperCamelCase :
def __call__( self ,_A ,_A = None ,_A = None ,_A = False ,_A = False ,_A = None ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
elif titles is None or texts is None:
_lowerCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
_A ,_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
_lowerCAmelCase : str = titles if not isinstance(_A ,_A ) else [titles]
_lowerCAmelCase : List[str] = texts if not isinstance(_A ,_A ) else [texts]
_lowerCAmelCase : Union[str, Any] = len(_A )
_lowerCAmelCase : Optional[Any] = questions if not isinstance(_A ,_A ) else [questions] * n_passages
if len(_A ) != len(_A ):
raise ValueError(
F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.""" )
_lowerCAmelCase : Union[str, Any] = super().__call__(_A ,_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Tuple = super().__call__(_A ,add_special_tokens=_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Optional[int] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A ,_A )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : List[Any] = attention_mask
return self.pad(_A ,padding=_A ,max_length=_A ,return_tensors=_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = 16 ,_A = 64 ,_A = 4 ,):
'''simple docstring'''
_lowerCAmelCase : int = reader_input['input_ids']
_lowerCAmelCase : int = reader_output[:3]
_lowerCAmelCase : Optional[Any] = len(_A )
_lowerCAmelCase : Any = sorted(range(_A ) ,reverse=_A ,key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : Any = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Optional[int] = len(_A )
_lowerCAmelCase : Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_A ,top_spans=_A ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_A ,start_index=_A ,end_index=_A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Tuple = sorted(_A ,key=lambda _A : x[1] ,reverse=_A )
_lowerCAmelCase : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
_lowerCAmelCase : List[str] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = ["input_ids", "attention_mask"]
| 702 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 128 ,_A = 256 ,_A = 2_0_0_0.0 ,_A = 768 ,_A = 12 ,_A = 12 ,_A = 64 ,_A = 2048 ,_A = 0.1 ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Sequential(
nn.Linear(_A ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,)
_lowerCAmelCase : Any = nn.Embedding(_A ,_A )
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : int = nn.Dropout(p=_A )
_lowerCAmelCase : int = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Any = DecoderLayer(d_model=_A ,d_kv=_A ,num_heads=_A ,d_ff=_A ,dropout_rate=_A )
self.decoders.append(_A )
_lowerCAmelCase : Optional[Any] = TaLayerNorm(_A )
_lowerCAmelCase : List[str] = nn.Dropout(p=_A )
_lowerCAmelCase : Optional[Any] = nn.Linear(_A ,_A ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
_lowerCAmelCase : Union[str, Any] = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : Union[str, Any] = torch.broadcast_to(
torch.arange(_A ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
_lowerCAmelCase : Any = self.position_encoding(_A )
_lowerCAmelCase : str = self.continuous_inputs_projection(_A )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(_A )
# decoder: No padding present.
_lowerCAmelCase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Optional[Any] = [(x, self.encoder_decoder_mask(_A ,_A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
_lowerCAmelCase : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Tuple = lyr(
_A ,conditioning_emb=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)[0]
_lowerCAmelCase : Any = self.decoder_norm(_A )
_lowerCAmelCase : List[Any] = self.post_dropout(_A )
_lowerCAmelCase : int = self.spec_out(_A )
return spec_out
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A ,d_ff=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ) )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = self.layer[0](
_A ,conditioning_emb=_A ,attention_mask=_A ,)
if encoder_hidden_states is not None:
_lowerCAmelCase : Any = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
_A ,key_value_states=_A ,attention_mask=_A ,)
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](_A ,_A )
return (hidden_states,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(_A )
_lowerCAmelCase : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Dict = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.FiLMLayer(_A ,_A )
# Self-attention block
_lowerCAmelCase : Union[str, Any] = self.attention(_A )
_lowerCAmelCase : Optional[Any] = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Optional[int] = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.layer_norm(_A )
_lowerCAmelCase : str = self.attention(
_A ,encoder_hidden_states=_A ,attention_mask=attention_mask.squeeze(1 ) ,)
_lowerCAmelCase : Any = hidden_states + self.dropout(_A )
return layer_output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=_A ,d_ff=_A ,dropout_rate=_A )
_lowerCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Any = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.film(_A ,_A )
_lowerCAmelCase : str = self.DenseReluDense(_A )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Any = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
_lowerCAmelCase : int = NewGELUActivation()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.act(self.wi_a(_A ) )
_lowerCAmelCase : Optional[int] = self.wi_a(_A )
_lowerCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
_lowerCAmelCase : Dict = self.dropout(_A )
_lowerCAmelCase : Dict = self.wo(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(_A ) )
_lowerCAmelCase : Optional[int] = eps
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_A )
_lowerCAmelCase : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __UpperCamelCase ( nn.Module ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A ,3.0 )) ))
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(_A ,out_features * 2 ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scale_bias(_A )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = torch.chunk(_A ,2 ,-1 )
_lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 16 | 0 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( a : Tuple = "" ):
snake_case__ = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
snake_case__ = BeautifulSoup(requests.get(__A ).text , """html.parser""" )
snake_case__ = soup.find_all("""td""" , attrs="""titleColumn""" )
snake_case__ = soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__A , __A )
}
def _UpperCAmelCase ( a : Any = "IMDb_Top_250_Movies.csv" ):
snake_case__ = get_imdb_top_aaa_movies()
with open(__A , """w""" , newline="""""" ) as out_file:
snake_case__ = csv.writer(__A )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 654 |
"""simple docstring"""
import qiskit
def __snake_case ( __A ,__A ) -> qiskit.result.counts.Counts:
lowercase : List[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
lowercase : int = qiskit.QuantumCircuit(4 ,2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 ,2 )
qc_ha.cx(1 ,2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 ,1 ,3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 ,0 ) # extract XOR value
qc_ha.measure(3 ,1 ) # extract AND value
# Execute the circuit on the qasm simulator
lowercase : Any = qiskit.execute(__A ,__A ,shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__A )
if __name__ == "__main__":
lowerCAmelCase: Optional[Any] =half_adder(1, 1)
print(F'Half Adder Output Qubit Counts: {counts}')
| 607 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
lowerCAmelCase__ = [[1, 2, 4], [1, 2, 3, 4]]
lowerCAmelCase__ = DisjunctiveConstraint(lowerCamelCase_ )
self.assertTrue(isinstance(dc.token_ids , lowerCamelCase_ ) )
with self.assertRaises(lowerCamelCase_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowerCamelCase_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
lowerCAmelCase__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCamelCase_ ):
DisjunctiveConstraint(lowerCamelCase_ ) # fails here
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = [[1, 2, 3], [1, 2, 4]]
lowerCAmelCase__ = DisjunctiveConstraint(lowerCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = dc.update(1 )
lowerCAmelCase__ = stepped is True and completed is False and reset is False
self.assertTrue(lowerCamelCase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = dc.update(2 )
lowerCAmelCase__ = stepped is True and completed is False and reset is False
self.assertTrue(lowerCamelCase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = dc.update(3 )
lowerCAmelCase__ = stepped is True and completed is True and reset is False
self.assertTrue(lowerCamelCase_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowerCAmelCase__ = DisjunctiveConstraint(lowerCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] ) | 98 |
'''simple docstring'''
from math import pi, sqrt, tan
def _snake_case ( A ) -> float:
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _snake_case ( A , A , A ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _snake_case ( A ) -> float:
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _snake_case ( A ) -> float:
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _snake_case ( A , A ) -> float:
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _snake_case ( A , A , A ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
lowerCAmelCase__ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _snake_case ( A , A ) -> float:
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _snake_case ( A , A ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(A , 2 ) * torus_radius * tube_radius
def _snake_case ( A , A ) -> float:
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _snake_case ( A ) -> float:
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _snake_case ( A , A ) -> float:
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _snake_case ( A , A , A ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
lowerCAmelCase__ = (sidea + sidea + sidea) / 2
lowerCAmelCase__ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _snake_case ( A , A ) -> float:
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _snake_case ( A , A , A ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _snake_case ( A ) -> float:
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _snake_case ( A , A ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _snake_case ( A , A ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _snake_case ( A , A ) -> float:
if not isinstance(A , A ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(f"""Rectangle: {area_rectangle(10, 20) = }""")
print(f"""Square: {area_square(10) = }""")
print(f"""Triangle: {area_triangle(10, 10) = }""")
print(f"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(f"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(f"""Rhombus: {area_rhombus(10, 20) = }""")
print(f"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(f"""Circle: {area_circle(20) = }""")
print(f"""Ellipse: {area_ellipse(10, 20) = }""")
print('''\nSurface Areas of various geometric shapes: \n''')
print(f"""Cube: {surface_area_cube(20) = }""")
print(f"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(f"""Sphere: {surface_area_sphere(20) = }""")
print(f"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(f"""Cone: {surface_area_cone(10, 20) = }""")
print(f"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(f"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(f"""Torus: {surface_area_torus(20, 10) = }""")
print(f"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(f"""Square: {area_reg_polygon(4, 10) = }""")
print(f"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""") | 98 | 1 |
import random
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Union[str, Any] = num - 1
lowerCamelCase :Dict = 0
while s % 2 == 0:
lowerCamelCase :Union[str, Any] = s // 2
t += 1
for _ in range(5):
lowerCamelCase :List[Any] = random.randrange(2 , num - 1)
lowerCamelCase :Tuple = pow(__A , __A , __A)
if v != 1:
lowerCamelCase :Tuple = 0
while v != (num - 1):
if i == t - 1:
return False
else:
lowerCamelCase :int = i + 1
lowerCamelCase :Union[str, Any] = (v**2) % num
return True
def _lowerCamelCase ( a_ : int):
if num < 2:
return False
lowerCamelCase :List[str] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__A)
def _lowerCamelCase ( a_ : int = 10_24):
while True:
lowerCamelCase :Optional[int] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize))
if is_prime_low_num(__A):
return num
if __name__ == "__main__":
A__ = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 166 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowerCamelCase__ ( __A :str ,__A :str ):
"""simple docstring"""
__snake_case = len([g for position, g in enumerate(__A ) if g == main_target[position]] )
return (item, float(__A ))
def lowerCamelCase__ ( __A :str ,__A :str ):
"""simple docstring"""
__snake_case = random.randint(0 ,len(__A ) - 1 )
__snake_case = parent_a[:random_slice] + parent_a[random_slice:]
__snake_case = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase__ ( __A :str ,__A :list[str] ):
"""simple docstring"""
__snake_case = list(__A )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
__snake_case = random.choice(__A )
return "".join(__A )
def lowerCamelCase__ ( __A :tuple[str, float] ,__A :list[tuple[str, float]] ,__A :list[str] ,):
"""simple docstring"""
__snake_case = []
# Generate more children proportionally to the fitness score.
__snake_case = int(parent_a[1] * 1_0_0 ) + 1
__snake_case = 1_0 if child_n >= 1_0 else child_n
for _ in range(__A ):
__snake_case = population_score[random.randint(0 ,__A )][0]
__snake_case , __snake_case = crossover(parent_a[0] ,__A )
# Append new string to the population list.
pop.append(mutate(__A ,__A ) )
pop.append(mutate(__A ,__A ) )
return pop
def lowerCamelCase__ ( __A :str ,__A :list[str] ,__A :bool = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
__snake_case = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(__A )
# Verify that the target contains no genes besides the ones inside genes variable.
__snake_case = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__snake_case = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(__A )
# Generate random starting population.
__snake_case = []
for _ in range(__A ):
population.append("""""".join([random.choice(__A ) for i in range(len(__A ) )] ) )
# Just some logs to know what the algorithms is doing.
__snake_case , __snake_case = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__A )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__snake_case = [evaluate(__A ,__A ) for item in population]
# Check if there is a matching evolution.
__snake_case = sorted(__A ,key=lambda __A : x[1] ,reverse=__A )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__snake_case = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__A )
# Normalize population score to be between 0 and 1.
__snake_case = [
(item, score / len(__A )) for item, score in population_score
]
# This is selection
for i in range(__A ):
population.extend(select(population_score[int(__A )] ,__A ,__A ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__A ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase__ = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
UpperCamelCase__ = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ = basic(target_str, genes_list)
print(
F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 268 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCamelCase : Tuple = logging.get_logger(__name__)
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
UpperCamelCase__ =["pixel_values"]
def __init__( self : int , lowerCamelCase_ : Dict = True , lowerCamelCase_ : str = None , lowerCamelCase_ : str = PILImageResampling.BICUBIC , lowerCamelCase_ : Optional[int] = True , lowerCamelCase_ : Tuple = True , lowerCamelCase_ : str = 1 / 255 , lowerCamelCase_ : Any = None , lowerCamelCase_ : List[Any] = True , lowerCamelCase_ : Dict = None , lowerCamelCase_ : Dict = None , **lowerCamelCase_ : Optional[Any] , ) -> None:
super().__init__(**_lowerCamelCase )
__magic_name__ : Tuple = size if size is not None else {'''height''': 224, '''width''': 224}
__magic_name__ : Optional[Any] = get_size_dict(_lowerCamelCase )
__magic_name__ : Optional[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__magic_name__ : Tuple = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase , param_name='''crop_size''' )
__magic_name__ : Optional[Any] = do_resize
__magic_name__ : int = do_rescale
__magic_name__ : Union[str, Any] = do_normalize
__magic_name__ : Optional[Any] = do_center_crop
__magic_name__ : Optional[int] = crop_size
__magic_name__ : List[str] = size
__magic_name__ : List[str] = resample
__magic_name__ : int = rescale_factor
__magic_name__ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__magic_name__ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self : str , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] = PILImageResampling.BILINEAR , lowerCamelCase_ : List[Any] = None , **lowerCamelCase_ : int , ) -> np.ndarray:
__magic_name__ : List[Any] = get_size_dict(_lowerCamelCase )
if "shortest_edge" in size:
__magic_name__ : Optional[int] = get_resize_output_image_size(_lowerCamelCase , size=size['''shortest_edge'''] , default_to_square=_lowerCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__magic_name__ : Optional[Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] = None , **lowerCamelCase_ : List[Any] , ) -> np.ndarray:
__magic_name__ : List[Any] = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_lowerCamelCase , size=(size['''height'''], size['''width''']) , data_format=_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] = None , **lowerCamelCase_ : Dict ) -> np.ndarray:
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : str , ) -> np.ndarray:
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : List[Any] = None , lowerCamelCase_ : int = None , lowerCamelCase_ : Union[str, Any] = None , lowerCamelCase_ : int = None , lowerCamelCase_ : Any = None , lowerCamelCase_ : List[Any] = None , lowerCamelCase_ : Any = None , lowerCamelCase_ : List[Any] = None , lowerCamelCase_ : int = None , lowerCamelCase_ : int = None , lowerCamelCase_ : Optional[int] = ChannelDimension.FIRST , **lowerCamelCase_ : Optional[int] , ) -> BatchFeature:
__magic_name__ : Tuple = do_resize if do_resize is not None else self.do_resize
__magic_name__ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : str = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__magic_name__ : str = crop_size if crop_size is not None else self.crop_size
__magic_name__ : Union[str, Any] = get_size_dict(_lowerCamelCase , param_name='''crop_size''' , default_to_square=_lowerCamelCase )
__magic_name__ : Optional[int] = resample if resample is not None else self.resample
__magic_name__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ : str = image_mean if image_mean is not None else self.image_mean
__magic_name__ : Dict = image_std if image_std is not None else self.image_std
__magic_name__ : Optional[Any] = size if size is not None else self.size
__magic_name__ : int = get_size_dict(_lowerCamelCase )
if not is_batched(_lowerCamelCase ):
__magic_name__ : Union[str, Any] = [images]
if not valid_images(_lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
__magic_name__ : str = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
__magic_name__ : Dict = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_center_crop:
__magic_name__ : int = [self.center_crop(image=_lowerCamelCase , size=_lowerCamelCase ) for image in images]
if do_rescale:
__magic_name__ : List[str] = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images]
if do_normalize:
__magic_name__ : Dict = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase ) for image in images]
__magic_name__ : str = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
__magic_name__ : Dict = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
| 712 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ =inspect.getfile(accelerate.test_utils )
UpperCamelCase__ =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
UpperCamelCase__ =['''accelerate''', '''launch''']
UpperCamelCase__ =Path.home() / '''.cache/huggingface/accelerate'''
UpperCamelCase__ ='''default_config.yaml'''
UpperCamelCase__ =config_folder / config_file
UpperCamelCase__ =config_folder / '''_default_config.yaml'''
UpperCamelCase__ =Path('''tests/test_configs''' )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] ) -> int:
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def UpperCAmelCase__ ( cls : Dict ) -> List[str]:
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
__magic_name__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=lowerCamelCase_ ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(lowerCamelCase_ ), self.test_file_path] , env=os.environ.copy() )
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ ='''test-tpu'''
UpperCamelCase__ ='''us-central1-a'''
UpperCamelCase__ ='''ls'''
UpperCamelCase__ =['''accelerate''', '''tpu-config''']
UpperCamelCase__ ='''cd /usr/share'''
UpperCamelCase__ ='''tests/test_samples/test_command_file.sh'''
UpperCamelCase__ ='''Running gcloud compute tpus tpu-vm ssh'''
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__ : str = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__ : int = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
__magic_name__ : Any = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=lowerCamelCase_ )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
__magic_name__ : Tuple = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
__magic_name__ : str = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : Union[str, Any] = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
__magic_name__ : List[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : str ) -> Dict:
__magic_name__ : Tuple = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Any ) -> Tuple:
__magic_name__ : Tuple = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
| 501 | 0 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a :Optional[int] = logging.get_logger(__name__)
__a :Optional[int] = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = 'detr'
_lowerCamelCase : Union[str, Any] = ['past_key_values']
_lowerCamelCase : Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : int , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Dict=3 , UpperCAmelCase : List[Any]=100 , UpperCAmelCase : Union[str, Any]=6 , UpperCAmelCase : Optional[int]=2048 , UpperCAmelCase : str=8 , UpperCAmelCase : Optional[Any]=6 , UpperCAmelCase : List[Any]=2048 , UpperCAmelCase : Tuple=8 , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Tuple=True , UpperCAmelCase : List[str]="relu" , UpperCAmelCase : List[str]=256 , UpperCAmelCase : str=0.1 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : Tuple=1.0 , UpperCAmelCase : Any=False , UpperCAmelCase : Union[str, Any]="sine" , UpperCAmelCase : str="resnet50" , UpperCAmelCase : List[Any]=True , UpperCAmelCase : int=False , UpperCAmelCase : Dict=1 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Dict=1 , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Dict=0.1 , **UpperCAmelCase : int , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
A_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = backbone_config.get("model_type" )
A_ = CONFIG_MAPPING[backbone_model_type]
A_ = config_class.from_dict(UpperCAmelCase )
# set timm attributes to None
A_ , A_ , A_ = None, None, None
A_ = use_timm_backbone
A_ = backbone_config
A_ = num_channels
A_ = num_queries
A_ = d_model
A_ = encoder_ffn_dim
A_ = encoder_layers
A_ = encoder_attention_heads
A_ = decoder_ffn_dim
A_ = decoder_layers
A_ = decoder_attention_heads
A_ = dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = activation_function
A_ = init_std
A_ = init_xavier_std
A_ = encoder_layerdrop
A_ = decoder_layerdrop
A_ = encoder_layers
A_ = auxiliary_loss
A_ = position_embedding_type
A_ = backbone
A_ = use_pretrained_backbone
A_ = dilation
# Hungarian matcher
A_ = class_cost
A_ = bbox_cost
A_ = giou_cost
# Loss coefficients
A_ = mask_loss_coefficient
A_ = dice_loss_coefficient
A_ = bbox_loss_coefficient
A_ = giou_loss_coefficient
A_ = eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def __A ( self : Dict ):
return self.encoder_attention_heads
@property
def __A ( self : List[Any] ):
return self.d_model
@classmethod
def __A ( cls : Dict , UpperCAmelCase : PretrainedConfig , **UpperCAmelCase : Any ):
return cls(backbone_config=UpperCAmelCase , **UpperCAmelCase )
def __A ( self : int ):
A_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
A_ = self.backbone_config.to_dict()
A_ = self.__class__.model_type
return output
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = version.parse('1.11' )
@property
def __A ( self : List[str] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __A ( self : Tuple ):
return 1E-5
@property
def __A ( self : Union[str, Any] ):
return 12 | 86 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_A = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_A = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
_A = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def __a ( self ) -> str:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , ) -> Any:
lowerCAmelCase_ = len(references[0] )
if any(len(_UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
lowerCAmelCase_ = [[refs[i] for refs in references] for i in range(_UpperCamelCase )]
lowerCAmelCase_ = TER(
normalized=_UpperCamelCase , no_punct=_UpperCamelCase , asian_support=_UpperCamelCase , case_sensitive=_UpperCamelCase , )
lowerCAmelCase_ = sb_ter.corpus_score(_UpperCamelCase , _UpperCamelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 290 | 0 |
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = (DDIMParallelScheduler,)
A_ = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def __UpperCAmelCase ( self : Any , **UpperCamelCase_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Tuple = {
'num_train_timesteps': 1000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**UpperCamelCase_ )
return config
def __UpperCAmelCase ( self : List[str] , **UpperCamelCase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
_lowercase : List[str] = self.scheduler_classes[0]
_lowercase : List[str] = self.get_scheduler_config(**UpperCamelCase_ )
_lowercase : Optional[Any] = scheduler_class(**UpperCamelCase_ )
_lowercase , _lowercase : Dict = 10, 0.0
_lowercase : Any = self.dummy_model()
_lowercase : Any = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase_ )
for t in scheduler.timesteps:
_lowercase : List[Any] = model(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
return sample
def __UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCamelCase_ )
_lowercase : List[str] = self.scheduler_classes[0]
_lowercase : List[Any] = self.get_scheduler_config(steps_offset=1 )
_lowercase : str = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def __UpperCAmelCase ( self : Tuple ) -> Any:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=UpperCamelCase_ , eta=UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Optional[Any] = self.scheduler_classes[0]
_lowercase : List[Any] = self.get_scheduler_config()
_lowercase : str = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def __UpperCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
_lowercase : Any = self.scheduler_classes[0]
_lowercase : Optional[Any] = self.get_scheduler_config()
_lowercase : List[Any] = scheduler_class(**UpperCamelCase_ )
_lowercase , _lowercase : Tuple = 10, 0.0
scheduler.set_timesteps(UpperCamelCase_ )
_lowercase : List[str] = self.dummy_model()
_lowercase : List[str] = self.dummy_sample_deter
_lowercase : Optional[Any] = self.dummy_sample_deter + 0.1
_lowercase : str = self.dummy_sample_deter - 0.1
_lowercase : str = samplea.shape[0]
_lowercase : Tuple = torch.stack([samplea, samplea, samplea] , dim=0 )
_lowercase : str = torch.arange(UpperCamelCase_ )[0:3, None].repeat(1 , UpperCamelCase_ )
_lowercase : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_lowercase : List[str] = scheduler.batch_step_no_noise(UpperCamelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , UpperCamelCase_ )
_lowercase : Dict = torch.sum(torch.abs(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase : Any = self.full_loop()
_lowercase : Optional[Any] = torch.sum(torch.abs(UpperCamelCase_ ) )
_lowercase : Tuple = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
_lowercase : int = self.full_loop(prediction_type='v_prediction' )
_lowercase : Tuple = torch.sum(torch.abs(UpperCamelCase_ ) )
_lowercase : List[Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : List[Any] = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.01 )
_lowercase : Union[str, Any] = torch.sum(torch.abs(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Optional[int] = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.01 )
_lowercase : str = torch.sum(torch.abs(UpperCamelCase_ ) )
_lowercase : Tuple = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 4 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple:
'''simple docstring'''
_lowercase : int = parent
_lowercase : str = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_choices
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_attention_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ )
_lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase : int = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = 5_0000
_lowercase : str = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : int = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 4 | 1 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_lowerCamelCase : Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_lowerCamelCase : Dict = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("\n".join(upper_files) + "\n")
_lowerCamelCase : Any = [file for file in filepaths if " " in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("\n".join(space_files) + "\n")
_lowerCamelCase : List[str] = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("\n".join(hyphen_files) + "\n")
_lowerCamelCase : str = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("\n".join(nodir_files) + "\n")
_lowerCamelCase : str = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 429 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __snake_case :
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
# Automatically constructed
lowerCAmelCase__ = "dict"
lowerCAmelCase__ = None
lowerCAmelCase__ = field(default="Translation" , init=_a , repr=_a )
def __call__( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __snake_case :
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
# Automatically constructed
lowerCAmelCase__ = "dict"
lowerCAmelCase__ = None
lowerCAmelCase__ = field(default="TranslationVariableLanguages" , init=_a , repr=_a )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = sorted(set(self.languages ) ) if self.languages else None
_lowerCAmelCase : Optional[int] = len(self.languages ) if self.languages else None
def __call__( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(_UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_lowerCAmelCase : Dict = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_lowerCAmelCase , _lowerCAmelCase : int = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 429 | 1 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class snake_case_ ( a, unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = VideoToVideoSDPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'} ) - {'image', 'width', 'height'}
__UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'} ) - {'image'}
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {'latents'}
__UpperCamelCase = False
# No `output_type`.
__UpperCamelCase = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def __UpperCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase__ =UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D"), up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D"), cross_attention_dim=32, attention_head_dim=4, )
UpperCAmelCase__ =DDIMScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule="scaled_linear", clip_sample=lowercase_, set_alpha_to_one=lowercase_, )
torch.manual_seed(0 )
UpperCAmelCase__ =AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
UpperCAmelCase__ =CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=512, )
UpperCAmelCase__ =CLIPTextModel(lowercase_ )
UpperCAmelCase__ =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase__ ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCAmelCase ( self, A_, A_=0 ) -> Optional[int]:
UpperCAmelCase__ =floats_tensor((1, 3, 3, 32, 32), rng=random.Random(lowercase_ ) ).to(lowercase_ )
if str(lowercase_ ).startswith("mps" ):
UpperCAmelCase__ =torch.manual_seed(lowercase_ )
else:
UpperCAmelCase__ =torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase__ ={
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase__ ="cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ =self.get_dummy_components()
UpperCAmelCase__ =VideoToVideoSDPipeline(**lowercase_ )
UpperCAmelCase__ =sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase__ =self.get_dummy_inputs(lowercase_ )
UpperCAmelCase__ ="np"
UpperCAmelCase__ =sd_pipe(**lowercase_ ).frames
UpperCAmelCase__ =frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase__ =np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", )
def __UpperCAmelCase ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase_, expected_max_diff=5E-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def __UpperCAmelCase ( self ) -> Dict:
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def __UpperCAmelCase ( self ) -> int:
pass
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return super().test_progress_bar()
@slow
@skip_mps
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase__ =VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL", torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase__ =torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ =torch.randn((1, 10, 3, 1024, 576), generator=lowercase_ )
UpperCAmelCase__ =video.to("cuda" )
UpperCAmelCase__ ="Spiderman is surfing"
UpperCAmelCase__ =pipe(lowercase_, video=lowercase_, generator=lowercase_, num_inference_steps=3, output_type="pt" ).frames
UpperCAmelCase__ =np.array([-1.0_45_89_84, -1.1_27_92_97, -0.9_66_30_86, -0.91_50_39_06, -0.75_09_76_56] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 716 |
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
_validate_point(A )
_validate_point(A )
if len(A ) != len(A ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(A , A ) ) )
def _UpperCAmelCase ( A ):
'''simple docstring'''
if point:
if isinstance(A , A ):
for item in point:
if not isinstance(A , (int, float) ):
UpperCAmelCase__ =(
"Expected a list of numbers as input, found "
F"""{type(A ).__name__}"""
)
raise TypeError(A )
else:
UpperCAmelCase__ =F"""Expected a list of numbers as input, found {type(A ).__name__}"""
raise TypeError(A )
else:
raise ValueError("Missing an input" )
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
_validate_point(A )
_validate_point(A )
if len(A ) != len(A ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(A , A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 510 | 0 |
def __UpperCAmelCase ( __a : List[Any] ,__a : int ,__a : List[Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
if height >= 1:
move_tower(height - 1 ,__a ,__a ,__a )
move_disk(__a ,__a )
move_tower(height - 1 ,__a ,__a ,__a )
def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ) -> Tuple:
"""simple docstring"""
print('''moving disk from''' ,__a ,'''to''' ,__a )
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
_a : List[str] = int(input('''Height of hanoi: ''' ).strip() )
move_tower(__a ,'''A''' ,'''B''' ,'''C''' )
if __name__ == "__main__":
main()
| 14 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowercase ( a , a ):
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
SCREAMING_SNAKE_CASE_ :List[Any] = flax_key_tuple[:-1] + ("weight",)
SCREAMING_SNAKE_CASE_ :Any = torch.permute(a , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(a ):
# linear layer
SCREAMING_SNAKE_CASE_ :str = flax_key_tuple[:-1] + ("weight",)
SCREAMING_SNAKE_CASE_ :Optional[int] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def lowercase ( a , a , a ):
'''simple docstring'''
if "metadata" in layer:
SCREAMING_SNAKE_CASE_ :Dict = layer.split("metadata" )
SCREAMING_SNAKE_CASE_ :Optional[int] = "".join(split_layer[0] )[:-1]
SCREAMING_SNAKE_CASE_ :str = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
SCREAMING_SNAKE_CASE_ :str = layer.split("kvstore" )
SCREAMING_SNAKE_CASE_ :str = "".join(split_layer[0] )[:-1]
SCREAMING_SNAKE_CASE_ :str = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = layer.split("/" )
SCREAMING_SNAKE_CASE_ :Optional[int] = "/".join(split_layer[:-1] )
SCREAMING_SNAKE_CASE_ :int = (split_layer[-1],)
if "kvstore/path" in layer:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = F"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
SCREAMING_SNAKE_CASE_ :Tuple = "file"
else:
SCREAMING_SNAKE_CASE_ :str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = rename_keys(a )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {}
for k, v in current_block.items():
SCREAMING_SNAKE_CASE_ :List[str] = v
SCREAMING_SNAKE_CASE_ :Optional[Any] = new_current_block
torch.save(a , a )
def lowercase ( a , a , a , a , a = WEIGHTS_NAME ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Optional[int] = convert_file_size_to_int(a )
SCREAMING_SNAKE_CASE_ :int = []
SCREAMING_SNAKE_CASE_ :str = {}
SCREAMING_SNAKE_CASE_ :List[str] = 0
SCREAMING_SNAKE_CASE_ :Optional[int] = 0
os.makedirs(a , exist_ok=a )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
SCREAMING_SNAKE_CASE_ :int = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
SCREAMING_SNAKE_CASE_ :Any = flatten_dict(a , sep="/" )
SCREAMING_SNAKE_CASE_ :Optional[Any] = {}
for layer in checkpoint_info.keys():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[Any] = get_key_and_tensorstore_dict(
a , a , a )
if curr_real_layer_name in all_layers:
SCREAMING_SNAKE_CASE_ :str = content
else:
SCREAMING_SNAKE_CASE_ :Optional[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
SCREAMING_SNAKE_CASE_ :Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
SCREAMING_SNAKE_CASE_ :List[Any] = torch.tensor(a )
SCREAMING_SNAKE_CASE_ :str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , a )
SCREAMING_SNAKE_CASE_ :Any = "/".join(a )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
SCREAMING_SNAKE_CASE_ :str = os.path.join(
a , weights_name.replace(".bin" , F"-{len(a )+1:05d}-of-???.bin" ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
del current_block
SCREAMING_SNAKE_CASE_ :Tuple = {}
SCREAMING_SNAKE_CASE_ :Dict = 0
SCREAMING_SNAKE_CASE_ :Optional[int] = raw_weights.to(getattr(a , a ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
SCREAMING_SNAKE_CASE_ :Dict = os.path.join(a , weights_name.replace(".bin" , F"-{len(a )+1:05d}-of-???.bin" ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(a ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE_ :Optional[int] = {}
SCREAMING_SNAKE_CASE_ :int = {}
for idx, shard in enumerate(a ):
SCREAMING_SNAKE_CASE_ :Optional[Any] = weights_name.replace(
".bin" , F"-{idx+1:05d}-of-{len(a ):05d}.bin" ) # len(sharded_state_dicts):05d}
SCREAMING_SNAKE_CASE_ :Any = os.path.join(a , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(a , os.path.join(a , a ) )
SCREAMING_SNAKE_CASE_ :List[Any] = shard
for key in shard:
SCREAMING_SNAKE_CASE_ :str = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE_ :List[str] = {"total_size": total_size}
SCREAMING_SNAKE_CASE_ :Optional[int] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(a , a ) , "w" , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE_ :Optional[int] = json.dumps(a , indent=2 , sort_keys=a ) + "\n"
f.write(a )
return metadata, index
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowercase ( ):
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
SCREAMING_SNAKE_CASE_ :Dict = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
SCREAMING_SNAKE_CASE_ :str = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
SCREAMING_SNAKE_CASE_ :List[Any] = TaTokenizer.from_pretrained("t5-small" )
SCREAMING_SNAKE_CASE_ :Optional[int] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
SCREAMING_SNAKE_CASE_ :List[Any] = tokenizer(a , return_tensors="pt" ).input_ids
SCREAMING_SNAKE_CASE_ :List[str] = model.generate(a , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 631 | 0 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase : pyspark.sql.DataFrame , lowerCamelCase : Optional[NamedSplit] = None , lowerCamelCase : Optional[Features] = None , lowerCamelCase : bool = True , lowerCamelCase : str = None , lowerCamelCase : bool = False , lowerCamelCase : str = None , lowerCamelCase : bool = True , lowerCamelCase : str = "arrow" , **lowerCamelCase : int , ) -> Union[str, Any]:
super().__init__(
split=lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase , streaming=lowerCamelCase , **lowerCamelCase , )
__snake_case : int = load_from_cache_file
__snake_case : int = file_format
__snake_case : int = Spark(
df=lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase , working_dir=lowerCamelCase , **lowerCamelCase , )
def __snake_case ( self : Any ) -> Dict:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__snake_case : str = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCamelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 203 |
import unittest
import numpy as np
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , ):
__snake_case : List[str] = np.shape(__lowerCamelCase )
__snake_case : Optional[Any] = np.shape(__lowerCamelCase )
__snake_case : List[str] = np.shape(__lowerCamelCase )
if shape_a[0] != shape_b[0]:
__snake_case : Any = (
"Expected the same number of rows for A and B. "
F'Instead found A of size {shape_a} and B of size {shape_b}'
)
raise ValueError(__lowerCamelCase )
if shape_b[1] != shape_c[1]:
__snake_case : int = (
"Expected the same number of columns for B and C. "
F'Instead found B of size {shape_b} and C of size {shape_c}'
)
raise ValueError(__lowerCamelCase )
__snake_case : str = pseudo_inv
if a_inv is None:
try:
__snake_case : Optional[Any] = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Tuple ) -> None:
__snake_case : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__snake_case : str = np.array([[0, 3], [3, 0], [2, 3]] )
__snake_case : Dict = np.array([[2, 1], [6, 3]] )
__snake_case : Dict = schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__snake_case : int = np.block([[a, b], [b.T, c]] )
__snake_case : Optional[int] = np.linalg.det(lowerCamelCase )
__snake_case : Any = np.linalg.det(lowerCamelCase )
__snake_case : Tuple = np.linalg.det(lowerCamelCase )
self.assertAlmostEqual(lowerCamelCase , det_a * det_s )
def __snake_case ( self : int ) -> None:
__snake_case : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__snake_case : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
__snake_case : Tuple = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCamelCase ):
schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : List[Any] ) -> None:
__snake_case : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__snake_case : Tuple = np.array([[0, 3], [3, 0], [2, 3]] )
__snake_case : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCamelCase ):
schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 203 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="blip_text_model"
def __init__( self , UpperCamelCase_=3_05_24 , UpperCamelCase_=7_68 , UpperCamelCase_=7_68 , UpperCamelCase_=30_72 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=8 , UpperCamelCase_=5_12 , UpperCamelCase_="gelu" , UpperCamelCase_=1E-12 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=3_05_22 , UpperCamelCase_=2 , UpperCamelCase_=0 , UpperCamelCase_=1_02 , UpperCamelCase_=True , UpperCamelCase_=True , **UpperCamelCase_ , ) -> Dict:
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , sep_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
__lowercase : Dict = vocab_size
__lowercase : Optional[Any] = hidden_size
__lowercase : List[str] = encoder_hidden_size
__lowercase : Optional[Any] = intermediate_size
__lowercase : str = projection_dim
__lowercase : List[str] = hidden_dropout_prob
__lowercase : Tuple = num_hidden_layers
__lowercase : Optional[int] = num_attention_heads
__lowercase : Tuple = max_position_embeddings
__lowercase : List[str] = layer_norm_eps
__lowercase : List[str] = hidden_act
__lowercase : List[Any] = initializer_range
__lowercase : str = attention_probs_dropout_prob
__lowercase : Union[str, Any] = is_decoder
__lowercase : Optional[Any] = use_cache
@classmethod
def _lowerCamelCase ( cls , UpperCamelCase_ , **UpperCamelCase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase_ )
__lowercase ,__lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
__lowercase : Tuple = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="blip_vision_model"
def __init__( self , UpperCamelCase_=7_68 , UpperCamelCase_=30_72 , UpperCamelCase_=5_12 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3_84 , UpperCamelCase_=16 , UpperCamelCase_="gelu" , UpperCamelCase_=1E-5 , UpperCamelCase_=0.0 , UpperCamelCase_=1E-10 , **UpperCamelCase_ , ) -> int:
super().__init__(**UpperCamelCase_ )
__lowercase : Dict = hidden_size
__lowercase : Tuple = intermediate_size
__lowercase : Any = projection_dim
__lowercase : Tuple = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : Union[str, Any] = patch_size
__lowercase : Tuple = image_size
__lowercase : Optional[int] = initializer_range
__lowercase : int = attention_dropout
__lowercase : List[Any] = layer_norm_eps
__lowercase : List[str] = hidden_act
@classmethod
def _lowerCamelCase ( cls , UpperCamelCase_ , **UpperCamelCase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase_ )
__lowercase ,__lowercase : Optional[Any] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
__lowercase : List[str] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="blip"
UpperCamelCase =True
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=5_12 , UpperCamelCase_=2.6_5_9_2 , UpperCamelCase_=2_56 , **UpperCamelCase_ , ) -> Optional[Any]:
super().__init__(**UpperCamelCase_ )
if text_config is None:
__lowercase : Tuple = {}
logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' )
if vision_config is None:
__lowercase : Optional[Any] = {}
logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' )
__lowercase : Tuple = BlipTextConfig(**UpperCamelCase_ )
__lowercase : Optional[int] = BlipVisionConfig(**UpperCamelCase_ )
__lowercase : str = self.vision_config.hidden_size
__lowercase : List[str] = projection_dim
__lowercase : Optional[Any] = logit_scale_init_value
__lowercase : Any = 1.0
__lowercase : str = 0.0_2
__lowercase : int = image_text_hidden_size
@classmethod
def _lowerCamelCase ( cls , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : str = copy.deepcopy(self.__dict__ )
__lowercase : Union[str, Any] = self.text_config.to_dict()
__lowercase : Tuple = self.vision_config.to_dict()
__lowercase : List[Any] = self.__class__.model_type
return output
| 76 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : str = logging.get_logger(__name__)
a__ : Any = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class __snake_case ( __magic_name__ ):
__lowerCAmelCase = '''xlm'''
__lowerCAmelCase = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self , UpperCamelCase_=3_0145 , UpperCamelCase_=2048 , UpperCamelCase_=12 , UpperCamelCase_=16 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=1 , UpperCamelCase_=True , UpperCamelCase_=512 , UpperCamelCase_=2048**-0.5 , UpperCamelCase_=1E-1_2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=0 , UpperCamelCase_=1 , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=5 , UpperCamelCase_=True , UpperCamelCase_="first" , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=0.1 , UpperCamelCase_=5 , UpperCamelCase_=5 , UpperCamelCase_=0 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=0 , **UpperCamelCase_ , ) -> List[str]:
snake_case__ = vocab_size
snake_case__ = emb_dim
snake_case__ = n_layers
snake_case__ = n_heads
snake_case__ = dropout
snake_case__ = attention_dropout
snake_case__ = gelu_activation
snake_case__ = sinusoidal_embeddings
snake_case__ = causal
snake_case__ = asm
snake_case__ = n_langs
snake_case__ = use_lang_emb
snake_case__ = layer_norm_eps
snake_case__ = bos_index
snake_case__ = eos_index
snake_case__ = pad_index
snake_case__ = unk_index
snake_case__ = mask_index
snake_case__ = is_encoder
snake_case__ = max_position_embeddings
snake_case__ = embed_init_std
snake_case__ = init_std
snake_case__ = summary_type
snake_case__ = summary_use_proj
snake_case__ = summary_activation
snake_case__ = summary_proj_to_labels
snake_case__ = summary_first_dropout
snake_case__ = start_n_top
snake_case__ = end_n_top
snake_case__ = mask_token_id
snake_case__ = lang_id
if "n_words" in kwargs:
snake_case__ = kwargs['n_words']
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
class __snake_case ( __magic_name__ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 368 | 0 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class a__ ( logging.LoggerAdapter ):
@staticmethod
def lowerCamelCase_ ( _lowerCamelCase :Dict ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] =PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowerCamelCase_ ( self :Union[str, Any] , _lowerCamelCase :List[Any] , _lowerCamelCase :Tuple , *_lowerCamelCase :Tuple , **_lowerCamelCase :int ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
UpperCamelCase_ : List[str] =kwargs.pop('main_process_only' , _lowerCamelCase )
UpperCamelCase_ : Tuple =kwargs.pop('in_order' , _lowerCamelCase )
if self.isEnabledFor(_lowerCamelCase ):
if self._should_log(_lowerCamelCase ):
UpperCamelCase_ : Optional[Any] =self.process(_lowerCamelCase , _lowerCamelCase )
self.logger.log(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
elif in_order:
UpperCamelCase_ : Any =PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
UpperCamelCase_ : str =self.process(_lowerCamelCase , _lowerCamelCase )
self.logger.log(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
state.wait_for_everyone()
def A_ ( __lowercase , __lowercase = None ):
if log_level is None:
UpperCamelCase_ : Any =os.environ.get('ACCELERATE_LOG_LEVEL' , __lowercase )
UpperCamelCase_ : List[str] =logging.getLogger(__lowercase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__lowercase , {} )
| 709 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def A_ ( __lowercase = "https://www.worldometers.info/coronavirus" ):
UpperCamelCase_ : Dict =BeautifulSoup(requests.get(__lowercase ).text , 'html.parser' )
UpperCamelCase_ : List[Any] =soup.findAll('h1' )
UpperCamelCase_ : List[str] =soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowercase , __lowercase )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""")
| 395 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['''YolosFeatureExtractor''']
UpperCAmelCase_ = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 271 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 271 | 1 |
"""simple docstring"""
# Imports
import numpy as np
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
if red is not None:
__lowerCAmelCase : Optional[int] = red
if green is not None:
__lowerCAmelCase : Any = green
if blue is not None:
__lowerCAmelCase : str = blue
if red_edge is not None:
__lowerCAmelCase : Dict = red_edge
if nir is not None:
__lowerCAmelCase : Dict = nir
return True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def __lowerCamelCase ( self ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def __lowerCamelCase ( self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def __lowerCamelCase ( self ):
return self.nir * (self.red / (self.green**2))
def __lowerCamelCase ( self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def __lowerCamelCase ( self ):
return (self.nir - self.red) / (self.nir + self.red)
def __lowerCamelCase ( self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def __lowerCamelCase ( self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def __lowerCamelCase ( self ):
return (self.nir - self.green) / (self.nir + self.green)
def __lowerCamelCase ( self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def __lowerCamelCase ( self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def __lowerCamelCase ( self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def __lowerCamelCase ( self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0.08 , _SCREAMING_SNAKE_CASE=1.22 , _SCREAMING_SNAKE_CASE=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def __lowerCamelCase ( self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def __lowerCamelCase ( self ):
return (self.nir / self.green) - 1
def __lowerCamelCase ( self ):
return (self.nir / self.redEdge) - 1
def __lowerCamelCase ( self ):
return (self.red - self.blue) / self.red
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def __lowerCamelCase ( self ):
return self.nir - self.green
def __lowerCamelCase ( self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def __lowerCamelCase ( self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
return (self.nir - b) / (a * self.red)
def __lowerCamelCase ( self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def __lowerCamelCase ( self ):
return (self.red + self.green + self.blue) / 30.5
def __lowerCamelCase ( self ):
return self.nir / self.red
def __lowerCamelCase ( self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def __lowerCamelCase ( self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def __lowerCamelCase ( self ):
return self.green / (self.nir + self.red + self.green)
def __lowerCamelCase ( self ):
return self.nir / (self.nir + self.red + self.green)
def __lowerCamelCase ( self ):
return self.red / (self.nir + self.red + self.green)
def __lowerCamelCase ( self ):
return (self.green - self.red) / (self.green + self.red)
def __lowerCamelCase ( self ):
return (self.red - self.green) / (self.red + self.green)
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__lowerCAmelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def __lowerCamelCase ( self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def __lowerCamelCase ( self ):
return self.nir / self.red
def __lowerCamelCase ( self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def __lowerCamelCase ( self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge) | 549 |
"""simple docstring"""
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip | 549 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Dict = StableDiffusionXLImgaImgPipeline
__UpperCAmelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
__UpperCAmelCase : int = PipelineTesterMixin.required_optional_params - {'latents'}
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCAmelCase : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCAmelCase ( self ):
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=_a , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__a = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=32 , )
__a = CLIPTextModel(_a )
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=_a )
__a = CLIPTextModelWithProjection(_a )
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=_a )
__a = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __UpperCAmelCase ( self , _a , _a=0 ):
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
__a = image / 2 + 0.5
if str(_a ).startswith('''mps''' ):
__a = torch.manual_seed(_a )
else:
__a = torch.Generator(device=_a ).manual_seed(_a )
__a = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def __UpperCAmelCase ( self ):
__a = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableDiffusionXLImgaImgPipeline(**_a )
__a = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
__a = self.get_dummy_inputs(_a )
__a = sd_pipe(**_a ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __UpperCAmelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a = self.get_dummy_components()
__a = StableDiffusionXLImgaImgPipeline(**_a )
__a = sd_pipe.to(_a )
__a = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
# forward without prompt embeds
__a = self.get_dummy_inputs(_a )
__a = 3 * ["""this is a negative prompt"""]
__a = negative_prompt
__a = 3 * [inputs["""prompt"""]]
__a = sd_pipe(**_a )
__a = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__a = self.get_dummy_inputs(_a )
__a = 3 * ["""this is a negative prompt"""]
__a = 3 * [inputs.pop('''prompt''' )]
(
__a
) = sd_pipe.encode_prompt(_a , negative_prompt=_a )
__a = sd_pipe(
**_a , prompt_embeds=_a , negative_prompt_embeds=_a , pooled_prompt_embeds=_a , negative_pooled_prompt_embeds=_a , )
__a = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self , _a , _a="cpu" , _a=torch.floataa , _a=0 ):
__a = torch.Generator(device=_a ).manual_seed(_a )
__a = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
__a = torch.from_numpy(_a ).to(device=_a , dtype=_a )
__a = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __UpperCAmelCase ( self ):
__a = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__a = self.get_inputs(_a )
__a = pipe(**_a ).images
__a = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 695 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase : List[Any] = 'CompVis/stable-diffusion-v1-1'
lowerCamelCase : Union[str, Any] = 'CompVis/stable-diffusion-v1-2'
lowerCamelCase : List[str] = 'CompVis/stable-diffusion-v1-3'
lowerCamelCase : Any = 'CompVis/stable-diffusion-v1-4'
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A , A , A , A , A , A , A , A = True , ) -> List[str]:
super()._init_()
snake_case : List[Any] = StableDiffusionPipeline.from_pretrained(A )
snake_case : str = StableDiffusionPipeline.from_pretrained(A )
snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained(A )
snake_case : Dict = StableDiffusionPipeline(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , requires_safety_checker=A , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase ( self ) -> Dict[str, Any]:
return {k: getattr(self , A ) for k in self.config.keys() if not k.startswith("""_""" )}
def UpperCAmelCase ( self , A = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCAmelCase ( self ) -> Dict:
self.enable_attention_slicing(A )
@torch.no_grad()
def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> Optional[Any]:
return self.pipea(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
@torch.no_grad()
def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> Optional[Any]:
return self.pipea(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
@torch.no_grad()
def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> Any:
return self.pipea(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
@torch.no_grad()
def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> Optional[int]:
return self.pipea(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
@torch.no_grad()
def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> List[Any]:
snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(A )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
snake_case : Tuple = self.textaimg_sda_a(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
# Get first result from Stable Diffusion Checkpoint v1.2
snake_case : Tuple = self.textaimg_sda_a(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
# Get first result from Stable Diffusion Checkpoint v1.3
snake_case : List[Any] = self.textaimg_sda_a(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
# Get first result from Stable Diffusion Checkpoint v1.4
snake_case : Dict = self.textaimg_sda_a(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 587 | 0 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
SCREAMING_SNAKE_CASE_ : Any = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE_ : List[str] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 274 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_ : List[Any] = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : str = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 274 | 1 |
def A ( snake_case__ : str ) -> Optional[Any]:
'''simple docstring'''
__snake_case = 0
# if input_string is "aba" than new_input_string become "a|b|a"
__snake_case = ''
__snake_case = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
__snake_case , __snake_case = 0, 0
# length[i] shows the length of palindromic substring with center i
__snake_case = [1 for i in range(len(__a ) )]
# for each character in new_string find corresponding palindromic string
__snake_case = 0
for j in range(len(__a ) ):
__snake_case = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
__snake_case = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
__snake_case = j - k + 1 # noqa: E741
__snake_case = j + k - 1
# update max_length and start position
if max_length < length[j]:
__snake_case = length[j]
__snake_case = j
# create that string
__snake_case = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __magic_name__ (unittest.TestCase ):
lowerCamelCase__ = StableDiffusionLDMaDPipeline
lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def __a ( self ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowerCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ = CLIPTextModel(_a )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __a ( self , _a , _a=0 ) -> Any:
if str(_a ).startswith("mps" ):
lowerCAmelCase_ = torch.manual_seed(_a )
else:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> List[str]:
lowerCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionLDMaDPipeline(**_a )
lowerCAmelCase_ = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
lowerCAmelCase_ = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionLDMaDPipeline(**_a )
lowerCAmelCase_ = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = 3 * [inputs["prompt"]]
# forward
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ = depth_slice_a[0, -3:, -1]
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = 3 * [inputs.pop("prompt" )]
lowerCAmelCase_ = ldmad_pipe.tokenizer(
_a , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_a , return_tensors="pt" , )
lowerCAmelCase_ = text_inputs["input_ids"].to(_a )
lowerCAmelCase_ = ldmad_pipe.text_encoder(_a )[0]
lowerCAmelCase_ = prompt_embeds
# forward
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=_a )
lowerCAmelCase_ = StableDiffusionLDMaDPipeline(**_a )
lowerCAmelCase_ = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = "french fries"
lowerCAmelCase_ = ldmad_pipe(**_a , negative_prompt=_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
lowerCAmelCase_ = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self , _a , _a="cpu" , _a=torch.floataa , _a=0 ) -> Dict:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ = torch.from_numpy(_a ).to(device=_a , dtype=_a )
lowerCAmelCase_ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
lowerCAmelCase_ = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_inputs(_a )
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = rgb[0, -3:, -3:, -1].flatten()
lowerCAmelCase_ = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
lowerCAmelCase_ = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
lowerCAmelCase_ = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self , _a , _a="cpu" , _a=torch.floataa , _a=0 ) -> Union[str, Any]:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ = torch.from_numpy(_a ).to(device=_a , dtype=_a )
lowerCAmelCase_ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> str:
lowerCAmelCase_ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_inputs(_a )
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = 0.4_9_5_5_8_6
lowerCAmelCase_ = 0.3_3_7_9_5_5_1_5
lowerCAmelCase_ = 1_1_2.4_8_5_1_8
lowerCAmelCase_ = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def __a ( self ) -> Dict:
lowerCAmelCase_ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_inputs(_a )
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = 0.4_1_9_4_1_2_7
lowerCAmelCase_ = 0.3_5_3_7_5_5_8_6
lowerCAmelCase_ = 0.5_6_3_8_5_0_2
lowerCAmelCase_ = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 122 | 0 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowercase__ ( _snake_case , _snake_case ):
A__ : str ="""pixel_values"""
A__ : Tuple =False
A__ : Optional[int] =TimmBackboneConfig
def __init__( self : List[str] , UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any] ):
requires_backends(self , 'timm' )
super().__init__(snake_case_ )
SCREAMING_SNAKE_CASE__ = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.' )
if hasattr(snake_case_ , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
SCREAMING_SNAKE_CASE__ = getattr(snake_case_ , 'use_pretrained_backbone' , snake_case_ )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
SCREAMING_SNAKE_CASE__ = config.out_indices if getattr(snake_case_ , 'out_indices' , snake_case_ ) is not None else (-1,)
SCREAMING_SNAKE_CASE__ = timm.create_model(
config.backbone , pretrained=snake_case_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=snake_case_ , **snake_case_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
SCREAMING_SNAKE_CASE__ = self._backbone.return_layers
SCREAMING_SNAKE_CASE__ = {layer["module"]: str(snake_case_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(snake_case_ )
@classmethod
def A_ ( cls : Any , UpperCAmelCase_ : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
SCREAMING_SNAKE_CASE__ = kwargs.pop('config' , TimmBackboneConfig() )
SCREAMING_SNAKE_CASE__ = kwargs.pop('use_timm_backbone' , snake_case_ )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
SCREAMING_SNAKE_CASE__ = kwargs.pop('num_channels' , config.num_channels )
SCREAMING_SNAKE_CASE__ = kwargs.pop('features_only' , config.features_only )
SCREAMING_SNAKE_CASE__ = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
SCREAMING_SNAKE_CASE__ = kwargs.pop('out_indices' , config.out_indices )
SCREAMING_SNAKE_CASE__ = TimmBackboneConfig(
backbone=snake_case_ , num_channels=snake_case_ , features_only=snake_case_ , use_pretrained_backbone=snake_case_ , out_indices=snake_case_ , )
return super()._from_config(snake_case_ , **snake_case_ )
def A_ ( self : str , UpperCAmelCase_ : int ):
pass
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : int=None , **UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
SCREAMING_SNAKE_CASE__ = self._all_layers
SCREAMING_SNAKE_CASE__ = self._backbone(snake_case_ , **snake_case_ )
SCREAMING_SNAKE_CASE__ = self._return_layers
SCREAMING_SNAKE_CASE__ = tuple(hidden_states[i] for i in self.out_indices )
else:
SCREAMING_SNAKE_CASE__ = self._backbone(snake_case_ , **snake_case_ )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = tuple(snake_case_ )
SCREAMING_SNAKE_CASE__ = tuple(snake_case_ ) if hidden_states is not None else None
if not return_dict:
SCREAMING_SNAKE_CASE__ = (feature_maps,)
if output_hidden_states:
SCREAMING_SNAKE_CASE__ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=snake_case_ , hidden_states=snake_case_ , attentions=snake_case_ )
| 721 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""DeiTFeatureExtractor"""]
__snake_case = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 400 | 0 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_lowerCAmelCase :Dict = logging.get_logger(__name__)
def __lowerCAmelCase ( a_ , a_ , a_ ) -> List[str]:
'''simple docstring'''
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def __lowerCAmelCase ( a_ , a_ , a_ ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = to_pil_image(a_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = pil_image.size
SCREAMING_SNAKE_CASE : Dict = pytesseract.image_to_data(a_ , lang=a_ , output_type='dict' , config=a_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE : str = [idx for idx, word in enumerate(a_ ) if not word.strip()]
SCREAMING_SNAKE_CASE : int = [word for idx, word in enumerate(a_ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE : Union[str, Any] = [coord for idx, coord in enumerate(a_ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE : List[str] = [coord for idx, coord in enumerate(a_ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE : List[Any] = [coord for idx, coord in enumerate(a_ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE : List[Any] = [coord for idx, coord in enumerate(a_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE : List[str] = []
for x, y, w, h in zip(a_ , a_ , a_ , a_ ):
SCREAMING_SNAKE_CASE : Tuple = [x, y, x + w, y + h]
actual_boxes.append(a_ )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE : Any = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(a_ , a_ , a_ ) )
assert len(a_ ) == len(a_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : List[str] = ["pixel_values"]
def __init__( self , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = 1 / 255 , lowercase__ = True , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = "" , **lowercase__ , ) -> None:
super().__init__(**lowercase__ )
SCREAMING_SNAKE_CASE : Any = size if size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(lowercase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize
SCREAMING_SNAKE_CASE : int = size
SCREAMING_SNAKE_CASE : str = resample
SCREAMING_SNAKE_CASE : int = do_rescale
SCREAMING_SNAKE_CASE : Tuple = rescale_value
SCREAMING_SNAKE_CASE : List[Any] = do_normalize
SCREAMING_SNAKE_CASE : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
SCREAMING_SNAKE_CASE : Optional[int] = apply_ocr
SCREAMING_SNAKE_CASE : Optional[Any] = ocr_lang
SCREAMING_SNAKE_CASE : List[str] = tesseract_config
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = None , **lowercase__ , ) -> np.ndarray:
SCREAMING_SNAKE_CASE : List[str] = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE : Optional[int] = (size['height'], size['width'])
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ) -> np.ndarray:
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ) -> np.ndarray:
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def _UpperCamelCase ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__=None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> PIL.Image.Image:
SCREAMING_SNAKE_CASE : Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(lowercase__ )
SCREAMING_SNAKE_CASE : int = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Any = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : List[str] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Any = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Optional[int] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : List[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE : List[str] = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE : Any = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE : List[Any] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Union[str, Any] = [to_numpy_array(lowercase__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract' )
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : str = []
for image in images:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = apply_tesseract(lowercase__ , lowercase__ , lowercase__ )
words_batch.append(lowercase__ )
boxes_batch.append(lowercase__ )
if do_resize:
SCREAMING_SNAKE_CASE : int = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : List[str] = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Any = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images]
SCREAMING_SNAKE_CASE : int = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
SCREAMING_SNAKE_CASE : List[Any] = BatchFeature(data={'pixel_values': images} , tensor_type=lowercase__ )
if apply_ocr:
SCREAMING_SNAKE_CASE : Optional[int] = words_batch
SCREAMING_SNAKE_CASE : str = boxes_batch
return data
| 251 | '''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCAmelCase :str = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , unet=lowercase__ , scheduler=lowercase__ , safety_checker=lowercase__ , feature_extractor=lowercase__ , )
def _UpperCamelCase ( self , lowercase__ = "auto" ) -> Optional[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase__ )
def _UpperCamelCase ( self ) -> str:
self.enable_attention_slicing(lowercase__ )
@torch.no_grad()
def __call__( self , lowercase__ , lowercase__ = 512 , lowercase__ = 512 , lowercase__ = 50 , lowercase__ = 7.5 , lowercase__ = None , lowercase__ = 1 , lowercase__ = 0.0 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , lowercase__ = None , lowercase__ = 1 , lowercase__ = None , **lowercase__ , ) -> Union[str, Any]:
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE : Dict = 1
elif isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE : int = len(lowercase__ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowercase__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase__ , lowercase__ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowercase__ )}.""" )
# get prompt text embeddings
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(
lowercase__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
SCREAMING_SNAKE_CASE : Optional[int] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE : Dict = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = text_embeddings.shape
SCREAMING_SNAKE_CASE : List[Any] = text_embeddings.repeat(1 , lowercase__ , 1 )
SCREAMING_SNAKE_CASE : str = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
SCREAMING_SNAKE_CASE : int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE : List[str]
if negative_prompt is None:
SCREAMING_SNAKE_CASE : str = ['']
elif type(lowercase__ ) is not type(lowercase__ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowercase__ )} !="""
F""" {type(lowercase__ )}.""" )
elif isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE : int = [negative_prompt]
elif batch_size != len(lowercase__ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowercase__ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
SCREAMING_SNAKE_CASE : int = negative_prompt
SCREAMING_SNAKE_CASE : int = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE : str = self.tokenizer(
lowercase__ , padding='max_length' , max_length=lowercase__ , truncation=lowercase__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE : Optional[Any] = uncond_embeddings.shape[1]
SCREAMING_SNAKE_CASE : Optional[Any] = uncond_embeddings.repeat(lowercase__ , lowercase__ , 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE : List[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
SCREAMING_SNAKE_CASE : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
SCREAMING_SNAKE_CASE : Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
SCREAMING_SNAKE_CASE : Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
SCREAMING_SNAKE_CASE : Optional[Any] = torch.randn(
lowercase__ , generator=lowercase__ , device='cpu' , dtype=lowercase__ ).to(self.device )
SCREAMING_SNAKE_CASE : Dict = torch.randn(lowercase__ , generator=lowercase__ , device='cpu' , dtype=lowercase__ ).to(
self.device )
else:
SCREAMING_SNAKE_CASE : Optional[int] = torch.randn(
lowercase__ , generator=lowercase__ , device=self.device , dtype=lowercase__ )
SCREAMING_SNAKE_CASE : int = torch.randn(lowercase__ , generator=lowercase__ , device=self.device , dtype=lowercase__ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
SCREAMING_SNAKE_CASE : Optional[Any] = latents_reference.to(self.device )
SCREAMING_SNAKE_CASE : Union[str, Any] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
SCREAMING_SNAKE_CASE : List[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
SCREAMING_SNAKE_CASE : int = (latents_shape[2] - latents_shape_reference[2]) // 2
SCREAMING_SNAKE_CASE : Tuple = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
SCREAMING_SNAKE_CASE : Any = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
SCREAMING_SNAKE_CASE : Optional[Any] = 0 if dx < 0 else dx
SCREAMING_SNAKE_CASE : Union[str, Any] = 0 if dy < 0 else dy
SCREAMING_SNAKE_CASE : List[str] = max(-dx , 0 )
SCREAMING_SNAKE_CASE : Optional[int] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
SCREAMING_SNAKE_CASE : Optional[int] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowercase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE : int = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE : str = {}
if accepts_eta:
SCREAMING_SNAKE_CASE : Dict = eta
for i, t in enumerate(self.progress_bar(lowercase__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.scale_model_input(lowercase__ , lowercase__ )
# predict the noise residual
SCREAMING_SNAKE_CASE : int = self.unet(lowercase__ , lowercase__ , encoder_hidden_states=lowercase__ ).sample
# perform guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = 1 / 0.1_8_2_1_5 * latents
SCREAMING_SNAKE_CASE : Tuple = self.vae.decode(lowercase__ ).sample
SCREAMING_SNAKE_CASE : int = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
SCREAMING_SNAKE_CASE : List[str] = self.feature_extractor(self.numpy_to_pil(lowercase__ ) , return_tensors='pt' ).to(
self.device )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.safety_checker(
images=lowercase__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
SCREAMING_SNAKE_CASE : str = None
if output_type == "pil":
SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowercase__ , nsfw_content_detected=lowercase__ )
| 251 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Any = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 704 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 673 | 0 |
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__magic_name__ : Any = {
"""n_samples""": 6_4,
"""horizon""": 3_2,
"""num_inference_steps""": 2_0,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
__magic_name__ : Any = """hopper-medium-v2"""
__magic_name__ : Optional[Any] = gym.make(env_name)
__magic_name__ : List[Any] = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
__magic_name__ : List[Any] = env.reset()
__magic_name__ : Optional[Any] = 0
__magic_name__ : Optional[int] = 0
__magic_name__ : List[str] = 1_0_0_0
__magic_name__ : str = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__magic_name__ : str = pipeline(obs, planning_horizon=3_2)
# execute action in environment
__magic_name__ : List[str] = env.step(denorm_actions)
__magic_name__ : Union[str, Any] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
f""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
__magic_name__ : Tuple = next_observation
except KeyboardInterrupt:
pass
print(f"""Total reward: {total_reward}""")
| 281 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCamelCase__ : Any = datasets.utils.logging.get_logger(__name__)
class _snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
__lowerCAmelCase : bool = None
__lowerCAmelCase : bool = None
class _snake_case ( folder_based_builder.FolderBasedBuilder ):
__lowerCAmelCase : Optional[Any] = datasets.Audio()
__lowerCAmelCase : Union[str, Any] = 'audio'
__lowerCAmelCase : str = AudioFolderConfig
__lowerCAmelCase : List[str] # definition at the bottom of the script
__lowerCAmelCase : Optional[int] = AudioClassification(audio_column='audio' , label_column='label' )
lowerCamelCase__ : int = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
lowerCamelCase__ : int = AUDIO_EXTENSIONS
| 12 | 0 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowercase : List[Any] = data_utils.TransfoXLTokenizer
lowercase : Tuple = data_utils.TransfoXLCorpus
lowercase : str = data_utils
lowercase : Any = data_utils
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Tuple) -> Dict:
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_lowerCamelCase , "rb") as fp:
__UpperCamelCase : str = pickle.load(_lowerCamelCase , encoding="latin1")
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__UpperCamelCase : Optional[int] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F'Save vocabulary to {pytorch_vocab_dump_path}')
__UpperCamelCase : Optional[Any] = corpus.vocab.__dict__
torch.save(_lowerCamelCase , _lowerCamelCase)
__UpperCamelCase : List[Any] = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , _lowerCamelCase)
__UpperCamelCase : int = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}')
torch.save(_lowerCamelCase , _lowerCamelCase)
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__UpperCamelCase : str = os.path.abspath(_lowerCamelCase)
__UpperCamelCase : List[Any] = os.path.abspath(_lowerCamelCase)
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.')
# Initialise PyTorch model
if transfo_xl_config_file == "":
__UpperCamelCase : List[str] = TransfoXLConfig()
else:
__UpperCamelCase : str = TransfoXLConfig.from_json_file(_lowerCamelCase)
print(F'Building PyTorch model from configuration: {config}')
__UpperCamelCase : int = TransfoXLLMHeadModel(_lowerCamelCase)
__UpperCamelCase : Dict = load_tf_weights_in_transfo_xl(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Save pytorch-model
__UpperCamelCase : Tuple = os.path.join(_lowerCamelCase , _lowerCamelCase)
__UpperCamelCase : int = os.path.join(_lowerCamelCase , _lowerCamelCase)
print(F'Save PyTorch model to {os.path.abspath(_lowerCamelCase)}')
torch.save(model.state_dict() , _lowerCamelCase)
print(F'Save configuration file to {os.path.abspath(_lowerCamelCase)}')
with open(_lowerCamelCase , "w" , encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
lowercase : Optional[int] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 94 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 'facebook/bart-large-mnli'
_A = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
_A = 'text_classifier'
_A = AutoTokenizer
_A = AutoModelForSequenceClassification
_A = ['text', ['text']]
_A = ['text']
def _lowerCamelCase ( self :str ) -> Tuple:
super().setup()
__UpperCamelCase : Any = self.model.config
__UpperCamelCase : List[Any] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
__UpperCamelCase : Optional[Any] = int(a )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def _lowerCamelCase ( self :Optional[int] , a :Dict , a :Optional[Any] ) -> Optional[int]:
__UpperCamelCase : str = labels
return self.pre_processor(
[text] * len(a ) , [f'This example is {label}' for label in labels] , return_tensors="pt" , padding="max_length" , )
def _lowerCamelCase ( self :Any , a :Any ) -> Optional[int]:
__UpperCamelCase : Optional[int] = outputs.logits
__UpperCamelCase : Dict = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id] | 94 | 1 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = CTRLTokenizer
UpperCamelCase__ : List[Any] = False
UpperCamelCase__ : Dict = False
def _A ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__SCREAMING_SNAKE_CASE = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
__SCREAMING_SNAKE_CASE = dict(zip(_A , range(len(_A ) ) ) )
__SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
__SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
def _A ( self , **_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_A )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """adapt react readapt apt"""
__SCREAMING_SNAKE_CASE = """adapt react readapt apt"""
return input_text, output_text
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__SCREAMING_SNAKE_CASE = """adapt react readapt apt"""
__SCREAMING_SNAKE_CASE = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
| 148 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Any = 'LayoutLMv3ImageProcessor'
__SCREAMING_SNAKE_CASE : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__(self , lowercase=None , lowercase=None , **lowercase ):
A_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase , )
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase , lowercase )
def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
A_ : Optional[int] = self.image_processor(images=lowercase , return_tensors=lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
A_ : Dict = features["""words"""]
A_ : Optional[int] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
# add pixel values
A_ : List[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
A_ : List[str] = self.get_overflowing_images(lowercase , encoded_inputs["""overflow_to_sample_mapping"""] )
A_ : Optional[int] = images
return encoded_inputs
def _a (self , lowercase , lowercase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A_ : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F' {len(lowercase )} and {len(lowercase )}' )
return images_with_overflow
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _a (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , )
return self.image_processor_class
@property
def _a (self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , )
return self.image_processor | 667 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case : str ={
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any =[
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] =[
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__snake_case : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase ,'''width_multiplier''' ) )
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase=13 ,__lowerCamelCase=64 ,__lowerCamelCase=2 ,__lowerCamelCase=3 ,__lowerCamelCase="swish" ,__lowerCamelCase=3 ,__lowerCamelCase=32 ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.02 ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=10 ,__lowerCamelCase=None ,__lowerCamelCase=0.25 ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.0 ,) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = parent
lowerCAmelCase__ : List[str] = batch_size
lowerCAmelCase__ : Dict = image_size
lowerCAmelCase__ : Tuple = patch_size
lowerCAmelCase__ : List[Any] = num_channels
lowerCAmelCase__ : Tuple = make_divisible(5_12 * width_multiplier ,divisor=8 )
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Optional[int] = conv_kernel_size
lowerCAmelCase__ : int = output_stride
lowerCAmelCase__ : List[Any] = classifier_dropout_prob
lowerCAmelCase__ : Dict = use_labels
lowerCAmelCase__ : Tuple = is_training
lowerCAmelCase__ : str = num_labels
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : Tuple = scope
lowerCAmelCase__ : Tuple = width_multiplier
lowerCAmelCase__ : Optional[int] = ffn_dropout
lowerCAmelCase__ : Dict = attn_dropout
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : Dict = None
if self.use_labels:
lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] ,self.num_labels )
lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
lowerCAmelCase__ : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,width_multiplier=self.width_multiplier ,ffn_dropout=self.ffn_dropout_prob ,attn_dropout=self.attn_dropout_prob ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : int = MobileViTVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : Dict = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.num_labels
lowerCAmelCase__ : Union[str, Any] = MobileViTVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : Dict = model(__lowerCamelCase ,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.num_labels
lowerCAmelCase__ : Optional[int] = MobileViTVaForSemanticSegmentation(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
lowerCAmelCase__ : List[str] = model(__lowerCamelCase ,labels=__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = config_and_inputs
lowerCAmelCase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
snake_case_ =(
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case_ =False
snake_case_ =False
snake_case_ =False
snake_case_ =False
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Dict = MobileViTVaModelTester(self )
lowerCAmelCase__ : Optional[int] = MobileViTVaConfigTester(self ,config_class=__lowerCamelCase ,has_text_modality=__lowerCamelCase )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Union[str, Any] = model_class(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : str = [*signature.parameters.keys()]
lowerCAmelCase__ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : Union[str, Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ) )
lowerCAmelCase__ : Dict = outputs.hidden_states
lowerCAmelCase__ : Any = 5
self.assertEqual(len(__lowerCamelCase ) ,__lowerCamelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCAmelCase__ : Optional[Any] = 2
for i in range(len(__lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2 )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Any = True
check_hidden_states_output(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Tuple = True
check_hidden_states_output(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCamelCase )
@slow
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Optional[int] = MobileViTVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : str = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
__lowerCamelCase )
lowerCAmelCase__ : str = self.default_image_processor
lowerCAmelCase__ : Union[str, Any] = prepare_img()
lowerCAmelCase__ : int = image_processor(images=__lowerCamelCase ,return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**__lowerCamelCase )
# verify the logits
lowerCAmelCase__ : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,__lowerCamelCase )
lowerCAmelCase__ : Dict = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCamelCase ,atol=1e-4 ) )
@slow
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase__ : Optional[Any] = model.to(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase__ : Optional[Any] = prepare_img()
lowerCAmelCase__ : Union[str, Any] = image_processor(images=__lowerCamelCase ,return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**__lowerCamelCase )
lowerCAmelCase__ : Tuple = outputs.logits
# verify the logits
lowerCAmelCase__ : Optional[int] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape ,__lowerCamelCase )
lowerCAmelCase__ : List[Any] = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] ,device=__lowerCamelCase ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,__lowerCamelCase ,atol=1e-4 ) )
@slow
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase__ : Dict = model.to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase__ : int = prepare_img()
lowerCAmelCase__ : Any = image_processor(images=__lowerCamelCase ,return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = outputs.logits.detach().cpu()
lowerCAmelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase ,target_sizes=[(50, 60)] )
lowerCAmelCase__ : Tuple = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape ,__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape ,__lowerCamelCase )
| 90 | 0 |
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __magic_name__ ( _lowerCamelCase : bytes , _lowerCamelCase : int ):
__a : Optional[int] = F'''{sampling_rate}'''
__a : str = """1"""
__a : Union[str, Any] = """f32le"""
__a : str = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(_lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__a : List[Any] = ffmpeg_process.communicate(_lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
__a : Optional[Any] = output_stream[0]
__a : Dict = np.frombuffer(_lowerCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : str = "f32le" , ):
__a : Union[str, Any] = F'''{sampling_rate}'''
__a : str = """1"""
if format_for_conversion == "s16le":
__a : Optional[Any] = 2
elif format_for_conversion == "f32le":
__a : int = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
__a : int = platform.system()
if system == "Linux":
__a : Dict = """alsa"""
__a : str = """default"""
elif system == "Darwin":
__a : Dict = """avfoundation"""
__a : Union[str, Any] = """:0"""
elif system == "Windows":
__a : Tuple = """dshow"""
__a : Optional[int] = """default"""
__a : int = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
__a : str = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__a : List[Any] = _ffmpeg_stream(_lowerCamelCase , _lowerCamelCase )
for item in iterator:
yield item
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[Union[Tuple[float, float], float]] = None , _lowerCamelCase : str = "f32le" , ):
if stream_chunk_s is not None:
__a : Optional[Any] = stream_chunk_s
else:
__a : Union[str, Any] = chunk_length_s
__a : Optional[Any] = ffmpeg_microphone(_lowerCamelCase , _lowerCamelCase , format_for_conversion=_lowerCamelCase )
if format_for_conversion == "s16le":
__a : Optional[int] = np.intaa
__a : Optional[Any] = 2
elif format_for_conversion == "f32le":
__a : Any = np.floataa
__a : List[Any] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
__a : int = chunk_length_s / 6
__a : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_lowerCamelCase , (int, float) ):
__a : List[Any] = [stride_length_s, stride_length_s]
__a : Dict = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__a : Tuple = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__a : Any = datetime.datetime.now()
__a : int = datetime.timedelta(seconds=_lowerCamelCase )
for item in chunk_bytes_iter(_lowerCamelCase , _lowerCamelCase , stride=(stride_left, stride_right) , stream=_lowerCamelCase ):
# Put everything back in numpy scale
__a : Any = np.frombuffer(item["""raw"""] , dtype=_lowerCamelCase )
__a : Any = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
__a : Tuple = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def __magic_name__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : Tuple[int, int] , _lowerCamelCase : bool = False ):
__a : Any = b""""""
__a , __a : Dict = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
__a : Tuple = 0
for raw in iterator:
acc += raw
if stream and len(_lowerCamelCase ) < chunk_len:
__a : int = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
__a : int = (_stride_left, stride_right)
__a : Union[str, Any] = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
__a : List[Any] = False
yield item
__a : str = stride_left
__a : Any = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_lowerCamelCase ) > stride_left:
__a : Optional[Any] = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
__a : Optional[Any] = False
yield item
def __magic_name__ ( _lowerCamelCase : Dict , _lowerCamelCase : int ):
__a : str = 2**2_4 # 16Mo
try:
with subprocess.Popen(_lowerCamelCase , stdout=subprocess.PIPE , bufsize=_lowerCamelCase ) as ffmpeg_process:
while True:
__a : str = ffmpeg_process.stdout.read(_lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 581 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
__a : List[Any] = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(_lowercase ) , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_lowercase ) , x.transpose() ) )
__a : Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = np.random.randn(3 , 4 )
__a : Optional[Any] = torch.tensor(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase ) , transpose(_lowercase ).numpy() ) )
__a : Optional[Any] = np.random.randn(3 , 4 , 5 )
__a : Any = torch.tensor(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , transpose(_lowercase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = np.random.randn(3 , 4 )
__a : int = tf.constant(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase ) , transpose(_lowercase ).numpy() ) )
__a : Any = np.random.randn(3 , 4 , 5 )
__a : List[str] = tf.constant(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , transpose(_lowercase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = np.random.randn(3 , 4 )
__a : Union[str, Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase ) , np.asarray(transpose(_lowercase ) ) ) )
__a : Optional[int] = np.random.randn(3 , 4 , 5 )
__a : Union[str, Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , np.asarray(transpose(_lowercase , axes=(1, 2, 0) ) ) ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , np.reshape(_lowercase , (4, 3) ) ) )
__a : Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_lowercase , (12, 5) ) , np.reshape(_lowercase , (12, 5) ) ) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = np.random.randn(3 , 4 )
__a : List[Any] = torch.tensor(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , reshape(_lowercase , (4, 3) ).numpy() ) )
__a : List[str] = np.random.randn(3 , 4 , 5 )
__a : Any = torch.tensor(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (12, 5) ) , reshape(_lowercase , (12, 5) ).numpy() ) )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = np.random.randn(3 , 4 )
__a : Dict = tf.constant(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , reshape(_lowercase , (4, 3) ).numpy() ) )
__a : Tuple = np.random.randn(3 , 4 , 5 )
__a : Optional[Any] = tf.constant(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (12, 5) ) , reshape(_lowercase , (12, 5) ).numpy() ) )
@require_flax
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = np.random.randn(3 , 4 )
__a : Tuple = jnp.array(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , np.asarray(reshape(_lowercase , (4, 3) ) ) ) )
__a : Tuple = np.random.randn(3 , 4 , 5 )
__a : Optional[int] = jnp.array(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (12, 5) ) , np.asarray(reshape(_lowercase , (12, 5) ) ) ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_lowercase ) , np.squeeze(_lowercase ) ) )
__a : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , np.squeeze(_lowercase , axis=2 ) ) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = np.random.randn(1 , 3 , 4 )
__a : List[Any] = torch.tensor(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase ) , squeeze(_lowercase ).numpy() ) )
__a : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
__a : str = torch.tensor(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , squeeze(_lowercase , axis=2 ).numpy() ) )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = np.random.randn(1 , 3 , 4 )
__a : Tuple = tf.constant(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase ) , squeeze(_lowercase ).numpy() ) )
__a : Any = np.random.randn(1 , 4 , 1 , 5 )
__a : List[str] = tf.constant(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , squeeze(_lowercase , axis=2 ).numpy() ) )
@require_flax
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = np.random.randn(1 , 3 , 4 )
__a : Union[str, Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase ) , np.asarray(squeeze(_lowercase ) ) ) )
__a : Any = np.random.randn(1 , 4 , 1 , 5 )
__a : Optional[Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , np.asarray(squeeze(_lowercase , axis=2 ) ) ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , np.expand_dims(_lowercase , axis=1 ) ) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = np.random.randn(3 , 4 )
__a : Any = torch.tensor(_lowercase )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , expand_dims(_lowercase , axis=1 ).numpy() ) )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = np.random.randn(3 , 4 )
__a : str = tf.constant(_lowercase )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , expand_dims(_lowercase , axis=1 ).numpy() ) )
@require_flax
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = np.random.randn(3 , 4 )
__a : Optional[Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , np.asarray(expand_dims(_lowercase , axis=1 ) ) ) )
| 581 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : Union[str, Any] = '''gpt_neox'''
def __init__( self , _UpperCamelCase=5_04_32 , _UpperCamelCase=61_44 , _UpperCamelCase=44 , _UpperCamelCase=64 , _UpperCamelCase=2_45_76 , _UpperCamelCase="gelu" , _UpperCamelCase=0.25 , _UpperCamelCase=1_00_00 , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase=20_48 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-5 , _UpperCamelCase=True , _UpperCamelCase=0 , _UpperCamelCase=2 , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = rotary_pct
lowerCAmelCase__ = rotary_emb_base
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = classifier_dropout
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = tie_word_embeddings
lowerCAmelCase__ = use_parallel_residual
lowerCAmelCase__ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}" )
lowerCAmelCase__ = self.rope_scaling.get('type' , _UpperCamelCase )
lowerCAmelCase__ = self.rope_scaling.get('factor' , _UpperCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(_UpperCamelCase , _UpperCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 704 |
def _UpperCamelCase ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
lowerCAmelCase__ = 0
lowerCAmelCase__ = str(UpperCamelCase_ )
while len(UpperCamelCase_ ) != 1:
lowerCAmelCase__ = [int(UpperCamelCase_ ) for i in num_string]
lowerCAmelCase__ = 1
for i in range(0 , len(UpperCamelCase_ ) ):
total *= numbers[i]
lowerCAmelCase__ = str(UpperCamelCase_ )
steps += 1
return steps
def _UpperCamelCase ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
lowerCAmelCase__ = 0
lowerCAmelCase__ = str(UpperCamelCase_ )
while len(UpperCamelCase_ ) != 1:
lowerCAmelCase__ = [int(UpperCamelCase_ ) for i in num_string]
lowerCAmelCase__ = 0
for i in range(0 , len(UpperCamelCase_ ) ):
total += numbers[i]
lowerCAmelCase__ = str(UpperCamelCase_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 | 0 |
from itertools import permutations
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
a__ : Union[str, Any] = [7, 11, 13, 17]
for i, test in enumerate(__UpperCamelCase ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def SCREAMING_SNAKE_CASE( __UpperCamelCase = 10 ) -> int:
return sum(
int("".join(map(__UpperCamelCase , __UpperCamelCase ) ) )
for num in permutations(range(__UpperCamelCase ) )
if is_substring_divisible(__UpperCamelCase ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 191 |
import operator as op
lowerCamelCase = """scaler.pt"""
lowerCamelCase = """pytorch_model"""
lowerCamelCase = """random_states"""
lowerCamelCase = """optimizer"""
lowerCamelCase = """scheduler"""
lowerCamelCase = """pytorch_model.bin"""
lowerCamelCase = """pytorch_model.bin.index.json"""
lowerCamelCase = """model.safetensors"""
lowerCamelCase = """model.safetensors.index.json"""
lowerCamelCase = """1.10.2"""
lowerCamelCase = """py38"""
lowerCamelCase = """4.17.0"""
lowerCamelCase = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
lowerCamelCase = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
lowerCamelCase = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
lowerCamelCase = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
lowerCamelCase = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
lowerCamelCase = """2.0.1"""
lowerCamelCase = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
lowerCamelCase = ["""default""", """reduce-overhead""", """max-autotune"""]
lowerCamelCase = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
lowerCamelCase = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
lowerCamelCase = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
lowerCamelCase = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 191 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "blip_2_vision_model"
def __init__( self : int , _UpperCAmelCase : Dict=14_08 , _UpperCAmelCase : Union[str, Any]=61_44 , _UpperCAmelCase : List[Any]=39 , _UpperCAmelCase : str=16 , _UpperCAmelCase : Any=2_24 , _UpperCAmelCase : int=14 , _UpperCAmelCase : str="gelu" , _UpperCAmelCase : str=0.00_001 , _UpperCAmelCase : List[Any]=0.0 , _UpperCAmelCase : Any=1e-1_0 , _UpperCAmelCase : Dict=True , **_UpperCAmelCase : int , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = patch_size
__lowercase = image_size
__lowercase = initializer_range
__lowercase = attention_dropout
__lowercase = layer_norm_eps
__lowercase = hidden_act
__lowercase = qkv_bias
@classmethod
def a__ ( cls : int , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Union[str, Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
__lowercase , __lowercase = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
__lowercase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = "blip_2_qformer"
def __init__( self : Dict , _UpperCAmelCase : Optional[int]=3_05_22 , _UpperCAmelCase : Optional[Any]=7_68 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : int=30_72 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Any=5_12 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1e-1_2 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : Any=2 , _UpperCAmelCase : str=14_08 , **_UpperCAmelCase : Dict , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = cross_attention_frequency
__lowercase = encoder_hidden_size
@classmethod
def a__ ( cls : Optional[int] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Any ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
__lowercase , __lowercase = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
__lowercase = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Any = "blip-2"
lowerCAmelCase__ : int = True
def __init__( self : int , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Optional[int]=32 , **_UpperCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
if vision_config is None:
__lowercase = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
__lowercase = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
__lowercase = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__lowercase = BlipaVisionConfig(**_UpperCAmelCase )
__lowercase = BlipaQFormerConfig(**_UpperCAmelCase )
__lowercase = text_config['model_type'] if 'model_type' in text_config else 'opt'
__lowercase = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
__lowercase = self.text_config.tie_word_embeddings
__lowercase = self.text_config.is_encoder_decoder
__lowercase = num_query_tokens
__lowercase = self.vision_config.hidden_size
__lowercase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__lowercase = 1.0
__lowercase = 0.02
@classmethod
def a__ ( cls : Union[str, Any] , _UpperCAmelCase : BlipaVisionConfig , _UpperCAmelCase : BlipaQFormerConfig , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : List[Any] , ) -> str:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , )
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.vision_config.to_dict()
__lowercase = self.qformer_config.to_dict()
__lowercase = self.text_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 711 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""jukebox""": 512,
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[Any] = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCAmelCase__ : Any = ["input_ids", "attention_mask"]
def __init__( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=["v3", "v2", "v2"] , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Union[str, Any]="<|endoftext|>" , **_UpperCAmelCase : Tuple , ) -> List[Any]:
"""simple docstring"""
__lowercase = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
super().__init__(
unk_token=_UpperCAmelCase , n_genres=_UpperCAmelCase , version=_UpperCAmelCase , max_n_lyric_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = version
__lowercase = max_n_lyric_tokens
__lowercase = n_genres
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
__lowercase = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__lowercase = oov.replace(R'\-\'' , R'\-+\'' )
__lowercase = regex.compile(_UpperCAmelCase )
__lowercase = {v: k for k, v in self.artists_encoder.items()}
__lowercase = {v: k for k, v in self.genres_encoder.items()}
__lowercase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = [self.artists_encoder.get(_UpperCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(_UpperCAmelCase ) ):
__lowercase = [self.genres_encoder.get(_UpperCAmelCase , 0 ) for genre in list_genres[genres]]
__lowercase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__lowercase = [[self.lyrics_encoder.get(_UpperCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def a__ ( self : str , _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
return list(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = self.prepare_for_tokenization(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = self._tokenize(_UpperCAmelCase )
return artist, genre, lyrics
def a__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]:
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__lowercase = artists[idx].lower()
__lowercase = [genres[idx].lower()]
else:
__lowercase = self._normalize(artists[idx] ) + '.v2'
__lowercase = [
self._normalize(_UpperCAmelCase ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
__lowercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
__lowercase = {vocab[index]: index + 1 for index in range(len(_UpperCAmelCase ) )}
__lowercase = 0
__lowercase = len(_UpperCAmelCase ) + 1
__lowercase = self.vocab
__lowercase = {v: k for k, v in self.vocab.items()}
__lowercase = ''
else:
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
__lowercase = self._run_strip_accents(_UpperCAmelCase )
__lowercase = lyrics.replace('\\' , '\n' )
__lowercase = self.out_of_vocab.sub('' , _UpperCAmelCase ), [], []
return artists, genres, lyrics
def a__ ( self : Tuple , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = unicodedata.normalize('NFD' , _UpperCAmelCase )
__lowercase = []
for char in text:
__lowercase = unicodedata.category(_UpperCAmelCase )
if cat == "Mn":
continue
output.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : str ) -> str:
"""simple docstring"""
__lowercase = (
[chr(_UpperCAmelCase ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
__lowercase = frozenset(_UpperCAmelCase )
__lowercase = re.compile(R'_+' )
__lowercase = ''.join([c if c in accepted else '_' for c in text.lower()] )
__lowercase = pattern.sub('_' , _UpperCAmelCase ).strip('_' )
return text
def a__ ( self : List[str] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
return " ".join(_UpperCAmelCase )
def a__ ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = False ) -> int:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = TensorType(_UpperCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
__lowercase = tf.constant
__lowercase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
__lowercase = torch.tensor
__lowercase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
__lowercase = jnp.array
__lowercase = _is_jax
else:
__lowercase = np.asarray
__lowercase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__lowercase = [inputs]
if not is_tensor(_UpperCAmelCase ):
__lowercase = as_tensor(_UpperCAmelCase )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int="" , _UpperCAmelCase : Tuple="pt" ) -> BatchEncoding:
"""simple docstring"""
__lowercase = [0, 0, 0]
__lowercase = [artist] * len(self.version )
__lowercase = [genres] * len(self.version )
__lowercase , __lowercase , __lowercase = self.tokenize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase , __lowercase , __lowercase = self._convert_token_to_id(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = [-INFINITY] * len(full_tokens[-1] )
__lowercase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_UpperCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def a__ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_UpperCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def a__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.artists_decoder.get(_UpperCAmelCase )
__lowercase = [self.genres_decoder.get(_UpperCAmelCase ) for genre in genres_index]
__lowercase = [self.lyrics_decoder.get(_UpperCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 688 | 0 |
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 185 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __a ( A__ ):
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "num_attention_heads" ) )
class __a :
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : List[Any]=64 , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : List[Any]=1 , SCREAMING_SNAKE_CASE : List[Any]=16 , SCREAMING_SNAKE_CASE : Tuple=[1_28, 2_56, 3_84] , SCREAMING_SNAKE_CASE : Tuple=[4, 6, 8] , SCREAMING_SNAKE_CASE : Dict=[2, 3, 4] , SCREAMING_SNAKE_CASE : Any=[16, 16, 16] , SCREAMING_SNAKE_CASE : List[str]=0 , SCREAMING_SNAKE_CASE : List[Any]=[2, 2, 2] , SCREAMING_SNAKE_CASE : int=[2, 2, 2] , SCREAMING_SNAKE_CASE : str=0.0_2 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : int=2 , ):
'''simple docstring'''
UpperCamelCase__ : Dict = parent
UpperCamelCase__ : Any = batch_size
UpperCamelCase__ : str = image_size
UpperCamelCase__ : Dict = num_channels
UpperCamelCase__ : str = kernel_size
UpperCamelCase__ : str = stride
UpperCamelCase__ : int = padding
UpperCamelCase__ : int = hidden_sizes
UpperCamelCase__ : Dict = num_attention_heads
UpperCamelCase__ : int = depths
UpperCamelCase__ : Optional[Any] = key_dim
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : List[str] = patch_size
UpperCamelCase__ : str = attention_ratio
UpperCamelCase__ : int = mlp_ratio
UpperCamelCase__ : Optional[int] = initializer_range
UpperCamelCase__ : Union[str, Any] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCamelCase__ : str = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : List[str] = num_labels
UpperCamelCase__ : int = initializer_range
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Tuple ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = LevitModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Any = model(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = (self.image_size, self.image_size)
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase__ : List[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCamelCase__ : int = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.num_labels
UpperCamelCase__ : Optional[Any] = LevitForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : str = config_and_inputs
UpperCamelCase__ : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __a ( A__ , A__ , unittest.TestCase ):
_lowerCAmelCase : str = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_lowerCAmelCase : List[str] = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : Any = False
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : List[Any] = False
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = LevitModelTester(self )
UpperCamelCase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : List[str] ):
'''simple docstring'''
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not output attentions" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : int = [*signature.parameters.keys()]
UpperCamelCase__ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ):
UpperCamelCase__ : Dict = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase__ : int = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : int = outputs.hidden_states
UpperCamelCase__ : List[Any] = len(self.model_tester.depths ) + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = (self.model_tester.image_size, self.model_tester.image_size)
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase__ : int = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCamelCase__ : Any = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=False ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Optional[int] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(SCREAMING_SNAKE_CASE )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCamelCase__ : Dict = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase__ : Union[str, Any] = False
UpperCamelCase__ : Dict = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCamelCase__ : int = model_class(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase__ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Optional[int] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(SCREAMING_SNAKE_CASE ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
UpperCamelCase__ : Optional[int] = problem_type["title"]
UpperCamelCase__ : Tuple = problem_type["num_labels"]
UpperCamelCase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if problem_type["num_labels"] > 1:
UpperCamelCase__ : Optional[int] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
UpperCamelCase__ : Tuple = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE ) as warning_list:
UpperCamelCase__ : Any = model(**SCREAMING_SNAKE_CASE ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Union[str, Any] = LevitModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
UpperCamelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : Tuple = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : List[str] = prepare_img()
UpperCamelCase__ : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase__ : List[Any] = model(**SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) | 228 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class __UpperCAmelCase ( _UpperCamelCase ):
__lowerCamelCase : str = "roc_bert"
def __init__( self : Optional[Any] , a_ : Union[str, Any]=3_05_22 , a_ : int=7_68 , a_ : int=12 , a_ : Optional[Any]=12 , a_ : Optional[int]=30_72 , a_ : List[str]="gelu" , a_ : int=0.1 , a_ : Dict=0.1 , a_ : List[str]=5_12 , a_ : int=2 , a_ : Any=0.02 , a_ : Any=1E-12 , a_ : Optional[Any]=True , a_ : Dict=0 , a_ : int="absolute" , a_ : Tuple=None , a_ : str=True , a_ : Any=True , a_ : Union[str, Any]=7_68 , a_ : List[str]=9_10 , a_ : Optional[Any]=5_12 , a_ : Optional[int]=2_48_58 , a_ : Tuple=True , **a_ : Dict , ) -> str:
'''simple docstring'''
a__ : Any = vocab_size
a__ : Any = max_position_embeddings
a__ : List[str] = hidden_size
a__ : Any = num_hidden_layers
a__ : List[Any] = num_attention_heads
a__ : str = intermediate_size
a__ : Dict = hidden_act
a__ : Optional[Any] = hidden_dropout_prob
a__ : List[Any] = attention_probs_dropout_prob
a__ : Any = initializer_range
a__ : List[Any] = type_vocab_size
a__ : List[Any] = layer_norm_eps
a__ : Tuple = use_cache
a__ : Dict = enable_pronunciation
a__ : Any = enable_shape
a__ : List[str] = pronunciation_embed_dim
a__ : int = pronunciation_vocab_size
a__ : int = shape_embed_dim
a__ : Tuple = shape_vocab_size
a__ : Any = concat_input
a__ : int = position_embedding_type
a__ : Dict = classifier_dropout
super().__init__(pad_token_id=a_ , **a_ ) | 251 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '''▁'''
__UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
__UpperCAmelCase = {
'''facebook/m2m100_418M''': 1024,
}
# fmt: off
__UpperCAmelCase = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class __UpperCAmelCase ( _UpperCamelCase ):
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = ["input_ids", "attention_mask"]
__lowerCamelCase : List[int] = []
__lowerCamelCase : List[int] = []
def __init__( self : Any , a_ : Any , a_ : int , a_ : int=None , a_ : Union[str, Any]=None , a_ : Optional[Any]="<s>" , a_ : Tuple="</s>" , a_ : int="</s>" , a_ : Optional[int]="<pad>" , a_ : List[Any]="<unk>" , a_ : Tuple="m2m100" , a_ : Optional[Dict[str, Any]] = None , a_ : Optional[Any]=8 , **a_ : Union[str, Any] , ) -> None:
'''simple docstring'''
a__ : int = {} if sp_model_kwargs is None else sp_model_kwargs
a__ : List[str] = language_codes
a__ : int = FAIRSEQ_LANGUAGE_CODES[language_codes]
a__ : Tuple = {lang_code: F"__{lang_code}__" for lang_code in fairseq_language_code}
a__ : Optional[Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(a_ )
for lang_code in fairseq_language_code
if self.get_lang_token(a_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=a_ , tgt_lang=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , unk_token=a_ , pad_token=a_ , language_codes=a_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=a_ , **a_ , )
a__ : List[str] = vocab_file
a__ : Optional[int] = load_json(a_ )
a__ : List[Any] = {v: k for k, v in self.encoder.items()}
a__ : List[Any] = spm_file
a__ : Any = load_spm(a_ , self.sp_model_kwargs )
a__ : Tuple = len(self.encoder )
a__ : Any = {
self.get_lang_token(a_ ): self.encoder_size + i for i, lang_code in enumerate(a_ )
}
a__ : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(a_ )}
a__ : Any = {v: k for k, v in self.lang_token_to_id.items()}
a__ : Union[str, Any] = src_lang if src_lang is not None else "en"
a__ : Union[str, Any] = tgt_lang
a__ : List[Any] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
a__ : Optional[int] = num_madeup_words
@property
def UpperCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCAmelCase ( self : Any ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCAmelCase ( self : List[Any] , a_ : str ) -> None:
'''simple docstring'''
a__ : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase ( self : Tuple , a_ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(a_ , out_type=a_ )
def UpperCAmelCase ( self : List[Any] , a_ : Optional[int] ) -> Any:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(a_ , self.encoder[self.unk_token] )
def UpperCAmelCase ( self : str , a_ : int ) -> str:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(a_ , self.unk_token )
def UpperCAmelCase ( self : Dict , a_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] = []
a__ : Optional[int] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a_ ) + token
a__ : List[str] = []
else:
current_sub_tokens.append(a_ )
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def UpperCAmelCase ( self : Union[str, Any] , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
a__ : Any = [1] * len(self.prefix_tokens )
a__ : Optional[int] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(a_ )) + suffix_ones
return prefix_ones + ([0] * len(a_ )) + ([0] * len(a_ )) + suffix_ones
def UpperCAmelCase ( self : Union[str, Any] , a_ : List[int] , a_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase ( self : str ) -> Dict:
'''simple docstring'''
a__ : int = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Dict:
'''simple docstring'''
a__ : Tuple = self.__dict__.copy()
a__ : Optional[int] = None
return state
def __setstate__( self : List[str] , a_ : Dict ) -> None:
'''simple docstring'''
a__ : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__ : List[Any] = {}
a__ : Optional[int] = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCAmelCase ( self : List[Any] , a_ : str , a_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
a__ : Dict = Path(a_ )
if not save_dir.is_dir():
raise OSError(F"{save_directory} should be a directory" )
a__ : Union[str, Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
a__ : Tuple = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , a_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(a_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , a_ )
elif not os.path.isfile(self.spm_file ):
with open(a_ , "wb" ) as fi:
a__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (str(a_ ), str(a_ ))
def UpperCAmelCase ( self : Any , a_ : List[str] , a_ : str = "en" , a_ : Optional[List[str]] = None , a_ : str = "ro" , **a_ : Dict , ) -> BatchEncoding:
'''simple docstring'''
a__ : str = src_lang
a__ : Any = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def UpperCAmelCase ( self : Optional[Any] , a_ : Dict , a_ : Optional[str] , a_ : Optional[str] , **a_ : Tuple ) -> str:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
a__ : List[Any] = src_lang
a__ : Optional[int] = self(a_ , add_special_tokens=a_ , **a_ )
a__ : Any = self.get_lang_id(a_ )
a__ : int = tgt_lang_id
return inputs
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase ( self : Union[str, Any] , a_ : str ) -> None:
'''simple docstring'''
a__ : Optional[int] = self.get_lang_token(a_ )
a__ : Tuple = self.lang_token_to_id[lang_token]
a__ : List[str] = [self.cur_lang_id]
a__ : Optional[int] = [self.eos_token_id]
def UpperCAmelCase ( self : List[str] , a_ : str ) -> None:
'''simple docstring'''
a__ : Optional[int] = self.get_lang_token(a_ )
a__ : int = self.lang_token_to_id[lang_token]
a__ : Tuple = [self.cur_lang_id]
a__ : Optional[int] = [self.eos_token_id]
def UpperCAmelCase ( self : Any , a_ : str ) -> str:
'''simple docstring'''
return self.lang_code_to_token[lang]
def UpperCAmelCase ( self : List[str] , a_ : str ) -> int:
'''simple docstring'''
a__ : List[str] = self.get_lang_token(a_ )
return self.lang_token_to_id[lang_token]
def lowercase__ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
a__ : Any = sentencepiece.SentencePieceProcessor(**lowerCAmelCase__ )
spm.Load(str(lowerCAmelCase__ ) )
return spm
def lowercase__ ( lowerCAmelCase__ : str ) -> Union[Dict, List]:
'''simple docstring'''
with open(lowerCAmelCase__ , "r" ) as f:
return json.load(lowerCAmelCase__ )
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
with open(lowerCAmelCase__ , "w" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=2 ) | 251 | 1 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase)
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase):
def __init__( self : List[Any] , *__UpperCamelCase : List[str] , **__UpperCamelCase : Dict ):
super().__init__(*A_ , **A_ )
requires_backends(self , "decord" )
self.check_model_type(A_ )
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Tuple=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Optional[int]=None ):
_UpperCAmelCase = {}
if frame_sampling_rate is not None:
_UpperCAmelCase = frame_sampling_rate
if num_frames is not None:
_UpperCAmelCase = num_frames
_UpperCAmelCase = {}
if top_k is not None:
_UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : str , __UpperCamelCase : Union[str, List[str]] , **__UpperCamelCase : List[str] ):
return super().__call__(A_ , **A_ )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Union[str, Any]=1 ):
if num_frames is None:
_UpperCAmelCase = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
_UpperCAmelCase = BytesIO(requests.get(A_ ).content )
_UpperCAmelCase = VideoReader(A_ )
videoreader.seek(0 )
_UpperCAmelCase = 0
_UpperCAmelCase = num_frames * frame_sampling_rate - 1
_UpperCAmelCase = np.linspace(A_ , A_ , num=A_ , dtype=np.intaa )
_UpperCAmelCase = videoreader.get_batch(A_ ).asnumpy()
_UpperCAmelCase = list(A_ )
_UpperCAmelCase = self.image_processor(A_ , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : List[Any] ):
_UpperCAmelCase = self.model(**A_ )
return model_outputs
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : Dict=5 ):
if top_k > self.model.config.num_labels:
_UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
_UpperCAmelCase = model_outputs.logits.softmax(-1 )[0]
_UpperCAmelCase , _UpperCAmelCase = probs.topk(A_ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_UpperCAmelCase = scores.tolist()
_UpperCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(A_ , A_ )]
| 684 | """simple docstring"""
import argparse
from collections import defaultdict
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case, snake_case):
__snake_case = f"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(snake_case, '''r''') as f:
__snake_case = f.readlines()
__snake_case = f"class {class_name}("
__snake_case = f"{4 * ' '}def {test_name}("
__snake_case = f"{8 * ' '}{correct_line.split()[0]}"
__snake_case = f"{16 * ' '}{correct_line.split()[0]}"
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = 0
__snake_case = 0
__snake_case = []
for line in lines:
if line.startswith(snake_case):
__snake_case = True
elif in_class and line.startswith(snake_case):
__snake_case = True
elif in_class and in_func and (line.startswith(snake_case) or line.startswith(snake_case)):
__snake_case = len(line.split(correct_line.split()[0])[0])
count += 1
if count == done_test[_id]:
__snake_case = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
__snake_case = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"{spaces * ' '}{correct_line}")
__snake_case = __snake_case = __snake_case = __snake_case = False
else:
new_lines.append(snake_case)
with open(snake_case, '''w''') as f:
for line in new_lines:
f.write(snake_case)
def SCREAMING_SNAKE_CASE ( snake_case, snake_case=None):
if fail is not None:
with open(snake_case, '''r''') as f:
__snake_case = {l.strip() for l in f.readlines()}
else:
__snake_case = None
with open(snake_case, '''r''') as f:
__snake_case = f.readlines()
__snake_case = defaultdict(snake_case)
for line in correct_lines:
__snake_case , __snake_case , __snake_case , __snake_case = line.split(''';''')
if test_failures is None or "::".join([file, class_name, test_name]) in test_failures:
overwrite_file(snake_case, snake_case, snake_case, snake_case, snake_case)
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
__lowercase : Union[str, Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename) | 564 | 0 |
from __future__ import annotations
class __lowercase :
'''simple docstring'''
def __init__( self : Tuple , _a : Dict=None ):
UpperCamelCase__ = data
UpperCamelCase__ = None
def __repr__( self : List[Any] ):
UpperCamelCase__ = []
UpperCamelCase__ = self
while temp:
string_rep.append(F"""{temp.data}""" )
UpperCamelCase__ = temp.next
return "->".join(_a )
def lowerCamelCase_ ( UpperCamelCase__ : list ):
'''simple docstring'''
if not elements_list:
raise Exception('''The Elements List is empty''' )
UpperCamelCase__ = UpperCamelCase__ = Node(elements_list[0] )
for i in range(1, len(UpperCamelCase__ ) ):
UpperCamelCase__ = Node(elements_list[i] )
UpperCamelCase__ = current.next
return head
def lowerCamelCase_ ( UpperCamelCase__ : Node ):
'''simple docstring'''
if head_node is not None and isinstance(UpperCamelCase__, UpperCamelCase__ ):
print_reverse(head_node.next )
print(head_node.data )
def lowerCamelCase_ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
UpperCamelCase__ = make_linked_list([14, 52, 14, 12, 43] )
print('''Linked List:''' )
print(UpperCamelCase__ )
print('''Elements in Reverse:''' )
print_reverse(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 702 | from string import ascii_uppercase
lowercase = {str(ord(c) - 5_5): c for c in ascii_uppercase}
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 36:
raise ValueError('''base must be <= 36''' )
UpperCamelCase__ = ''''''
UpperCamelCase__ = 0
UpperCamelCase__ = 0
while div != 1:
UpperCamelCase__ , UpperCamelCase__ = divmod(UpperCamelCase__, UpperCamelCase__ )
if base >= 11 and 9 < mod < 36:
UpperCamelCase__ = ALPHABET_VALUES[str(UpperCamelCase__ )]
else:
UpperCamelCase__ = str(UpperCamelCase__ )
new_value += actual_value
UpperCamelCase__ = num // base
UpperCamelCase__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(UpperCamelCase__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 3_7):
for num in range(1_0_0_0):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 591 | 0 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class snake_case_ ( _UpperCamelCase ):
'''simple docstring'''
lowerCamelCase = CustomTokenizer
pass
| 488 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 112 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__):
lowerCamelCase__ = list(range(len(lowercase__)))
lowerCamelCase__ = [v / w for v, w in zip(lowercase__ , lowercase__)]
index.sort(key=lambda lowercase__: ratio[i] , reverse=lowercase__)
lowerCamelCase__ = 0
lowerCamelCase__ = [0] * len(lowercase__)
for i in index:
if weight[i] <= capacity:
lowerCamelCase__ = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCamelCase__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 187 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
lowerCamelCase__ = cva.getAffineTransform(lowercase__ , lowercase__)
return cva.warpAffine(lowercase__ , lowercase__ , (rows, cols))
if __name__ == "__main__":
# read original image
__A : Any = cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
__A : int = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__A , __A : Dict = gray_img.shape
# set different points to rotate image
__A : int = np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa)
__A : Any = np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa)
__A : str = np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa)
__A : Optional[int] = np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa)
# add all rotated images in a list
__A : List[Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__A : int = plt.figure(1)
__A : int = ["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 187 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : List[str] ={
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict =["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple =[
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 440 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = ["""image_processor""", """tokenizer"""]
__lowercase = """CLIPImageProcessor"""
__lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self :List[Any] , lowercase_ :List[str]=None , lowercase_ :Any=None , **lowercase_ :Union[str, Any] )-> Optional[Any]:
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
A__ = kwargs.pop("feature_extractor" )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self :Optional[Any] , lowercase_ :Optional[int]=None , lowercase_ :Dict=None , lowercase_ :List[Any]=None , **lowercase_ :Optional[Any] )-> Union[str, Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
A__ = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
A__ = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
A__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def UpperCAmelCase_ ( self :int , *lowercase_ :Tuple , **lowercase_ :Any )-> Dict:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :Any , *lowercase_ :Any , **lowercase_ :Any )-> List[str]:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self :Union[str, Any] )-> Optional[int]:
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase_ ( self :List[Any] )-> str:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def UpperCAmelCase_ ( self :List[str] )-> List[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 440 | 1 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
A_ : int = datasets.utils.logging.get_logger(__name__)
A_ : str = ['names', 'prefix']
A_ : List[Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
A_ : List[Any] = ['encoding_errors', 'on_bad_lines']
A_ : Optional[int] = ['date_format']
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: str = ","
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[Union[int, List[int], str]] = "infer"
UpperCAmelCase__: Optional[List[str]] = None
UpperCAmelCase__: Optional[List[str]] = None
UpperCAmelCase__: Optional[Union[int, str, List[int], List[str]]] = None
UpperCAmelCase__: Optional[Union[List[int], List[str]]] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True
UpperCAmelCase__: Optional[Literal["c", "python", "pyarrow"]] = None
UpperCAmelCase__: Dict[Union[int, str], Callable[[Any], Any]] = None
UpperCAmelCase__: Optional[list] = None
UpperCAmelCase__: Optional[list] = None
UpperCAmelCase__: bool = False
UpperCAmelCase__: Optional[Union[int, List[int]]] = None
UpperCAmelCase__: Optional[int] = None
UpperCAmelCase__: Optional[Union[str, List[str]]] = None
UpperCAmelCase__: bool = True
UpperCAmelCase__: bool = True
UpperCAmelCase__: bool = False
UpperCAmelCase__: bool = True
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: str = "."
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: str = '"'
UpperCAmelCase__: int = 0
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True
UpperCAmelCase__: bool = True
UpperCAmelCase__: int = 0
UpperCAmelCase__: bool = True
UpperCAmelCase__: bool = False
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: int = 1_00_00
UpperCAmelCase__: Optional[datasets.Features] = None
UpperCAmelCase__: Optional[str] = "strict"
UpperCAmelCase__: Literal["error", "warn", "skip"] = "error"
UpperCAmelCase__: Optional[str] = None
def __A ( self ):
if self.delimiter is not None:
A__ : Any = self.delimiter
if self.column_names is not None:
A__ : Optional[Any] = self.column_names
@property
def __A ( self ):
A__ : Tuple = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , A__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _a (datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__: Tuple = CsvConfig
def __A ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ : Any = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
A__ : Dict = data_files
if isinstance(A__ , A__ ):
A__ : Any = [files]
A__ : Dict = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[Any] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
A__ : Dict = [files]
A__ : int = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A__ ):
if self.config.features is not None:
A__ : List[str] = self.config.features.arrow_schema
if all(not require_storage_cast(A__ ) for feature in self.config.features.values() ):
# cheaper cast
A__ : Any = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=A__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
A__ : Optional[int] = table_cast(A__ , A__ )
return pa_table
def __A ( self , A__ ):
A__ : Any = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
A__ : Optional[int] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(A__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
A__ : Union[str, Any] = pd.read_csv(A__ , iterator=A__ , dtype=A__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(A__ ):
A__ : Optional[int] = pa.Table.from_pandas(A__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise
| 64 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a (__magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , A__ , A__ , A__ = None , A__ = 5_0257 , A__ = 1024 , A__ = 768 , A__ = 12 , A__ = 12 , A__ = None , A__ = "gelu_new" , A__ = 0.1 , A__ = 0.1 , A__ = 0.1 , A__ = 1e-5 , A__ = 0.0_2 , A__ = True , A__ = True , A__ = False , A__ = False , ):
super().__init__()
A__ : Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
A__ : str = prefix_inner_dim
A__ : Optional[Any] = prefix_hidden_dim
A__ : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : int = (
nn.Linear(self.prefix_hidden_dim , A__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : Tuple = GPTaConfig(
vocab_size=A__ , n_positions=A__ , n_embd=A__ , n_layer=A__ , n_head=A__ , n_inner=A__ , activation_function=A__ , resid_pdrop=A__ , embd_pdrop=A__ , attn_pdrop=A__ , layer_norm_epsilon=A__ , initializer_range=A__ , scale_attn_weights=A__ , use_cache=A__ , scale_attn_by_inverse_layer_idx=A__ , reorder_and_upcast_attn=A__ , )
A__ : int = GPTaLMHeadModel(A__ )
def __A ( self , A__ , A__ , A__ = None , A__ = None , ):
A__ : List[str] = self.transformer.transformer.wte(A__ )
A__ : int = self.encode_prefix(A__ )
A__ : int = self.decode_prefix(A__ )
A__ : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
A__ : List[str] = self.transformer(inputs_embeds=A__ , labels=A__ , attention_mask=A__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __A ( self , A__ , A__ ):
return torch.zeros(A__ , self.prefix_length , dtype=torch.intaa , device=A__ )
def __A ( self , A__ ):
return self.encode_prefix(A__ )
@torch.no_grad()
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = torch.split(A__ , 1 , dim=0 )
A__ : Optional[int] = []
A__ : str = []
for feature in features:
A__ : Dict = self.decode_prefix(feature.to(A__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Union[str, Any] = self.generate_beam(
input_embeds=A__ , device=A__ , eos_token_id=A__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : int = torch.stack(A__ )
A__ : List[Any] = torch.stack(A__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __A ( self , A__=None , A__=None , A__=None , A__ = 5 , A__ = 67 , A__ = 1.0 , A__ = None , ):
A__ : Any = eos_token_id
A__ : Any = None
A__ : Optional[int] = None
A__ : Optional[Any] = torch.ones(A__ , device=A__ , dtype=torch.int )
A__ : Any = torch.zeros(A__ , device=A__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Dict = input_embeds
else:
A__ : str = self.transformer.transformer.wte(A__ )
for i in range(A__ ):
A__ : Dict = self.transformer(inputs_embeds=A__ )
A__ : str = outputs.logits
A__ : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Any = logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Optional[int] = logits.topk(A__ , -1 )
A__ : List[Any] = generated.expand(A__ , *generated.shape[1:] )
A__ , A__ : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : Optional[Any] = next_tokens
else:
A__ : List[Any] = tokens.expand(A__ , *tokens.shape[1:] )
A__ : int = torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Optional[int] = -float(np.inf )
A__ : List[Any] = 0
A__ : str = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Dict = scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] = scores_sum_average.view(-1 ).topk(A__ , -1 )
A__ : Tuple = next_tokens // scores_sum.shape[1]
A__ : Optional[Any] = seq_lengths[next_tokens_source]
A__ : List[str] = next_tokens % scores_sum.shape[1]
A__ : Optional[int] = next_tokens.unsqueeze(1 )
A__ : int = tokens[next_tokens_source]
A__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
A__ : str = generated[next_tokens_source]
A__ : Optional[Any] = scores_sum_average * seq_lengths
A__ : Union[str, Any] = is_stopped[next_tokens_source]
A__ : str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
A__ : List[str] = is_stopped + next_tokens.eq(A__ ).squeeze()
if is_stopped.all():
break
A__ : Dict = scores / seq_lengths
A__ : Dict = scores.argsort(descending=A__ )
# tokens tensors are already padded to max_seq_length
A__ : Union[str, Any] = [tokens[i] for i in order]
A__ : Any = torch.stack(A__ , dim=0 )
A__ : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 64 | 1 |
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
_lowerCAmelCase = logging.getLogger(__name__)
class UpperCamelCase (__snake_case ):
def __init__( self :Tuple , __magic_name__ :str , __magic_name__ :List[Any] , __magic_name__ :Dict , __magic_name__ :int=None ) ->Optional[Any]:
super().__init__(
__magic_name__ , question_encoder_tokenizer=__magic_name__ , generator_tokenizer=__magic_name__ , index=__magic_name__ , init_retrieval=__magic_name__ , )
lowercase : str = None
def __snake_case ( self :Dict , __magic_name__ :int ) ->Union[str, Any]:
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
lowercase : List[str] = self._infer_socket_ifname()
# avoid clash with the NCCL port
lowercase : List[str] = str(distributed_port + 1 )
lowercase : str = dist.new_group(ranks=__magic_name__ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __snake_case ( self :List[Any] ) ->Optional[int]:
return dist.get_rank(group=self.process_group ) == 0
def __snake_case ( self :Tuple , __magic_name__ :List[Any] , __magic_name__ :Dict , __magic_name__ :Dict=torch.floataa ) ->List[str]:
lowercase : int = torch.empty(__magic_name__ , dtype=__magic_name__ )
dist.scatter(__magic_name__ , src=0 , scatter_list=__magic_name__ , group=self.process_group )
return target_tensor
def __snake_case ( self :Optional[Any] ) ->List[str]:
lowercase : List[str] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
lowercase : Tuple = next((addr for addr in addrs if addr.startswith("""e""" )) , __magic_name__ )
return ifname
def __snake_case ( self :Optional[Any] , __magic_name__ :np.ndarray , __magic_name__ :int ) ->Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
lowercase , lowercase : Optional[Any] = self._main_retrieve(__magic_name__ , __magic_name__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__magic_name__ )
# distributed training
lowercase : List[str] = dist.get_world_size(group=self.process_group )
# gather logic
lowercase : List[str] = None
if self._is_main():
lowercase : int = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__magic_name__ )]
dist.gather(torch.tensor(__magic_name__ ) , dst=0 , gather_list=__magic_name__ , group=self.process_group )
# scatter logic
lowercase : Dict = question_hidden_states.shape[0]
lowercase : str = []
lowercase : List[str] = []
if self._is_main():
assert len(__magic_name__ ) == world_size
lowercase , lowercase : Tuple = self._main_retrieve(torch.cat(__magic_name__ ).numpy() , __magic_name__ )
lowercase , lowercase : Tuple = torch.tensor(__magic_name__ ), torch.tensor(__magic_name__ )
lowercase : Any = self._chunk_tensor(__magic_name__ , __magic_name__ )
lowercase : List[Any] = self._chunk_tensor(__magic_name__ , __magic_name__ )
lowercase : int = self._scattered(__magic_name__ , [n_queries, n_docs] , target_type=torch.intaa )
lowercase : Optional[int] = self._scattered(__magic_name__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__magic_name__ )
| 264 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_lowerCAmelCase = logging.getLogger(__name__)
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=3_05_22, type=int)
_lowerCAmelCase = parser.parse_args()
logger.info(F'Loading data from {args.data_file}')
with open(args.data_file, 'rb') as fp:
_lowerCAmelCase = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
_lowerCAmelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
_lowerCAmelCase = [0] * args.vocab_size
for k, v in counter.items():
_lowerCAmelCase = v
logger.info(F'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 264 | 1 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
A__: Union[str, Any] = '''
import os
'''
A__: List[Any] = '''
def foo():
import os
return False
'''
A__: List[str] = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
A__: Tuple = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
A__: List[Any] = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
A__: Dict = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
A__: Optional[int] = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
A__: str = '''
import os
try:
import bar
except:
raise ValueError()
'''
A__: Union[str, Any] = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
A__: Optional[int] = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
A__: str = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[str] ) -> int:
_a : Optional[Any] =os.path.join(_UpperCAmelCase ,"""test_file.py""" )
with open(_UpperCAmelCase ,"""w""" ) as _tmp_file:
_tmp_file.write(_UpperCAmelCase )
_a : Union[str, Any] =get_imports(_UpperCAmelCase )
assert parsed_imports == ["os"]
| 506 |
'''simple docstring'''
from functools import lru_cache
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> set:
_a : Any =2
_a : Tuple =set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_UpperCAmelCase )
if n > 1:
factors.add(_UpperCAmelCase )
return factors
@lru_cache
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
return len(unique_prime_factors(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ) -> bool:
return len(set(_UpperCAmelCase ) ) in (0, 1)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> list:
_a : int =2
while True:
# Increment each value of a generated range
_a : str =[base + i for i in range(_UpperCAmelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_a : List[Any] =[upf_len(_UpperCAmelCase ) for x in group]
checker.append(_UpperCAmelCase )
# If all numbers in the list are equal, return the group variable.
if equality(_UpperCAmelCase ):
return group
# Increment our base variable by 1
base += 1
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4 ) -> int:
_a : Optional[int] =run(_UpperCAmelCase )
return results[0] if len(_UpperCAmelCase ) else None
if __name__ == "__main__":
print(solution())
| 506 | 1 |
def _lowercase ( a__ : int = 10_00 ) -> int:
"""simple docstring"""
_UpperCamelCase = 3
_UpperCamelCase = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 147 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=lowercase ):
__lowercase : str = ["note_seq"]
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["note_seq"] )
@classmethod
def lowercase ( cls , *lowerCamelCase_ , **lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["note_seq"] )
@classmethod
def lowercase ( cls , *lowerCamelCase_ , **lowerCamelCase_ ) -> str:
"""simple docstring"""
requires_backends(cls , ["note_seq"] )
| 147 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case__ = logging.get_logger(__name__)
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = ['pixel_values']
def __init__( self : List[Any] , __A : bool = True , __A : Dict[str, int] = None , __A : PILImageResampling = PIL.Image.BICUBIC , __A : bool = True , __A : Dict[str, int] = None , __A : Union[int, float] = 1 / 255 , __A : bool = True , __A : bool = True , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , **__A : Tuple , ) ->None:
"""simple docstring"""
super().__init__(**__A )
a__ :Any = size if size is not None else {"height": 256, "width": 256}
a__ :Optional[Any] = get_size_dict(__A )
a__ :Optional[Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
a__ :Optional[Any] = get_size_dict(__A , param_name="crop_size" )
a__ :List[Any] = do_resize
a__ :Dict = size
a__ :Union[str, Any] = resample
a__ :Optional[int] = do_center_crop
a__ :List[str] = crop_size
a__ :Dict = do_rescale
a__ :List[str] = rescale_factor
a__ :List[str] = do_normalize
a__ :Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ :Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self : Any , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PIL.Image.BICUBIC , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Optional[int] , ) ->np.ndarray:
"""simple docstring"""
a__ :Dict = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
__A , size=(size["height"], size["width"]) , resample=__A , data_format=__A , **__A )
def _snake_case ( self : Tuple , __A : np.ndarray , __A : Dict[str, int] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Union[str, Any] , ) ->np.ndarray:
"""simple docstring"""
a__ :Optional[Any] = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(__A , size=(size["height"], size["width"]) , data_format=__A , **__A )
def _snake_case ( self : Optional[int] , __A : np.ndarray , __A : Union[int, float] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : int , ) ->Union[str, Any]:
"""simple docstring"""
return rescale(__A , scale=__A , data_format=__A , **__A )
def _snake_case ( self : List[str] , __A : np.ndarray , __A : Union[float, List[float]] , __A : Union[float, List[float]] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Optional[Any] , ) ->np.ndarray:
"""simple docstring"""
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def _snake_case ( self : Optional[int] , __A : ImageInput , __A : bool = None , __A : Dict[str, int] = None , __A : Any=None , __A : bool = None , __A : Dict[str, int] = None , __A : bool = None , __A : float = None , __A : bool = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[str, TensorType]] = None , __A : ChannelDimension = ChannelDimension.FIRST , **__A : str , ) ->PIL.Image.Image:
"""simple docstring"""
a__ :List[Any] = do_resize if do_resize is not None else self.do_resize
a__ :Any = resample if resample is not None else self.resample
a__ :int = do_center_crop if do_center_crop is not None else self.do_center_crop
a__ :str = do_rescale if do_rescale is not None else self.do_rescale
a__ :Any = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ :List[Any] = do_normalize if do_normalize is not None else self.do_normalize
a__ :Any = image_mean if image_mean is not None else self.image_mean
a__ :List[str] = image_std if image_std is not None else self.image_std
a__ :Tuple = size if size is not None else self.size
a__ :Optional[int] = get_size_dict(__A )
a__ :str = crop_size if crop_size is not None else self.crop_size
a__ :List[Any] = get_size_dict(__A , param_name="crop_size" )
a__ :Optional[int] = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
a__ :Optional[Any] = [to_numpy_array(__A ) for image in images]
if do_resize:
a__ :Any = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
a__ :Dict = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
a__ :Optional[Any] = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
a__ :List[str] = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
a__ :str = [to_channel_dimension_format(__A , __A ) for image in images]
a__ :List[Any] = {"pixel_values": images}
return BatchFeature(data=__A , tensor_type=__A )
| 710 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
snake_case__ = '''scheduler_config.json'''
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = 1
lowerCamelCase_ = 2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = 5
lowerCamelCase_ = 6
lowerCamelCase_ = 7
lowerCamelCase_ = 8
lowerCamelCase_ = 9
lowerCamelCase_ = 10
lowerCamelCase_ = 11
lowerCamelCase_ = 12
lowerCamelCase_ = 13
lowerCamelCase_ = 14
@dataclass
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = 42
class lowerCAmelCase_ :
lowerCamelCase_ = SCHEDULER_CONFIG_NAME
lowerCamelCase_ = []
lowerCamelCase_ = True
@classmethod
def _snake_case ( cls : Union[str, Any] , __A : Dict[str, Any] = None , __A : Optional[str] = None , __A : Optional[int]=False , **__A : Union[str, Any] , ) ->List[Any]:
"""simple docstring"""
a__ , a__ , a__ :List[Any] = cls.load_config(
pretrained_model_name_or_path=__A , subfolder=__A , return_unused_kwargs=__A , return_commit_hash=__A , **__A , )
return cls.from_config(__A , return_unused_kwargs=__A , **__A )
def _snake_case ( self : str , __A : Union[str, os.PathLike] , __A : bool = False , **__A : Optional[Any] ) ->str:
"""simple docstring"""
self.save_config(save_directory=__A , push_to_hub=__A , **__A )
@property
def _snake_case ( self : List[Any] ) ->Dict:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def _snake_case ( cls : Dict ) ->int:
"""simple docstring"""
a__ :Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
a__ :Union[str, Any] = importlib.import_module(__name__.split("." )[0] )
a__ :Optional[int] = [
getattr(__A , __A ) for c in compatible_classes_str if hasattr(__A , __A )
]
return compatible_classes
| 373 | 0 |
def UpperCamelCase (lowercase_: list ) -> list:
A__ : int = len(lowercase_ )
for i in range(1 , lowercase_ ):
A__ : List[Any] = collection[i]
A__ : Optional[int] = 0
A__ : Tuple = i - 1
while low <= high:
A__ : int = (low + high) // 2
if val < collection[mid]:
A__ : List[Any] = mid - 1
else:
A__ : Tuple = mid + 1
for j in range(lowercase_ , lowercase_ , -1 ):
A__ : Tuple = collection[j - 1]
A__ : str = val
return collection
if __name__ == "__main__":
A_ : int = input('Enter numbers separated by a comma:\n').strip()
A_ : Union[str, Any] = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 456 |
import argparse
import copy
def UpperCamelCase (lowercase_: Any ) -> Union[str, Any]:
A__ : int = {}
with open(lowercase_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
A__ : int = []
_list.append([line.split()[1], line.split()[2]] )
A__ : Any = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
A__ : Optional[int] = []
_list.append([line.split()[0], line.split()[2]] )
A__ : Any = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def UpperCamelCase (lowercase_: Tuple , lowercase_: List[str] ) -> Tuple:
with open(lowercase_ ) as f:
A__ : List[Any] = f.read(1 )
A__ : Union[str, Any] = start_node
A__ : List[Any] = []
A__ : str = start_node
A__ : List[str] = 0
while visiting not in first_solution:
A__ : Dict = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowercase_ ) and k[0] not in first_solution:
A__ : Tuple = k[1]
A__ : Optional[Any] = k[0]
first_solution.append(lowercase_ )
A__ : Optional[int] = distance_of_first_solution + int(lowercase_ )
A__ : str = best_node
first_solution.append(lowercase_ )
A__ : Any = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
A__ : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def UpperCamelCase (lowercase_: Any , lowercase_: str ) -> Any:
A__ : int = []
for n in solution[1:-1]:
A__ : Union[str, Any] = solution.index(lowercase_ )
for kn in solution[1:-1]:
A__ : List[Any] = solution.index(lowercase_ )
if n == kn:
continue
A__ : Union[str, Any] = copy.deepcopy(lowercase_ )
A__ : int = kn
A__ : Optional[int] = n
A__ : Dict = 0
for k in _tmp[:-1]:
A__ : Dict = _tmp[_tmp.index(lowercase_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
A__ : List[Any] = distance + int(i[1] )
_tmp.append(lowercase_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
A__ : Optional[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowercase_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def UpperCamelCase (lowercase_: List[str] , lowercase_: Tuple , lowercase_: Dict , lowercase_: Dict , lowercase_: Tuple ) -> Union[str, Any]:
A__ : Union[str, Any] = 1
A__ : List[str] = first_solution
A__ : Dict = []
A__ : Union[str, Any] = distance_of_first_solution
A__ : Any = solution
while count <= iters:
A__ : Optional[int] = find_neighborhood(lowercase_ , lowercase_ )
A__ : Union[str, Any] = 0
A__ : str = neighborhood[index_of_best_solution]
A__ : int = len(lowercase_ ) - 1
A__ : List[str] = False
while not found:
A__ : Dict = 0
while i < len(lowercase_ ):
if best_solution[i] != solution[i]:
A__ : Any = best_solution[i]
A__ : Optional[int] = solution[i]
break
A__ : Any = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
A__ : List[str] = True
A__ : List[Any] = best_solution[:-1]
A__ : Optional[Any] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
A__ : Tuple = cost
A__ : int = solution
else:
A__ : List[Any] = index_of_best_solution + 1
A__ : Tuple = neighborhood[index_of_best_solution]
if len(lowercase_ ) >= size:
tabu_list.pop(0 )
A__ : Any = count + 1
return best_solution_ever, best_cost
def UpperCamelCase (lowercase_: Union[str, Any]=None ) -> List[str]:
A__ : Union[str, Any] = generate_neighbours(args.File )
A__ , A__ : int = generate_first_solution(
args.File , lowercase_ )
A__ , A__ : str = tabu_search(
lowercase_ , lowercase_ , lowercase_ , args.Iterations , args.Size , )
print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 456 | 1 |
from torch import nn
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
super().__init__()
__lowercase = class_size
__lowercase = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case__ ( self , lowerCAmelCase_ ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
__lowercase = self.mlp(lowerCAmelCase_ )
return logits
| 576 | import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __snake_case ,unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = PhobertTokenizer
__lowerCAmelCase = False
def snake_case__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = ["T@@", "i", "I", "R@@", "r", "e@@"]
__lowercase = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
__lowercase = ["#version: 0.2", "l à</w>"]
__lowercase = {"unk_token": "<unk>"}
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase_ ) )
def snake_case__ ( self , **lowerCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = "Tôi là VinAI Research"
__lowercase = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def snake_case__ ( self ):
__lowercase = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase = "Tôi là VinAI Research"
__lowercase = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
__lowercase = tokenizer.tokenize(lowerCAmelCase_ )
print(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
| 576 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__=None ) -> Optional[Any]:
if subparsers is not None:
__lowerCamelCase : Dict = subparsers.add_parser('env' )
else:
__lowerCamelCase : List[Any] = argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file' , default=lowerCamelCase__ , help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase__ )
return parser
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
__lowerCamelCase : Optional[Any] = torch.__version__
__lowerCamelCase : Union[str, Any] = torch.cuda.is_available()
__lowerCamelCase : Tuple = is_xpu_available()
__lowerCamelCase : str = is_npu_available()
__lowerCamelCase : List[str] = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCamelCase__ ):
__lowerCamelCase : str = load_config_from_file(args.config_file ).to_dict()
__lowerCamelCase : str = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F"{pt_version} ({pt_cuda_available})",
'PyTorch XPU available': str(lowerCamelCase__ ),
'PyTorch NPU available': str(lowerCamelCase__ ),
'System RAM': F"{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB",
}
if pt_cuda_available:
__lowerCamelCase : Union[str, Any] = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([F"- {prop}: {val}" for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
__lowerCamelCase : Union[str, Any] = (
'\n'.join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
else F"\t{accelerate_config}"
)
print(lowerCamelCase__ )
__lowerCamelCase : Any = accelerate_config
return info
def SCREAMING_SNAKE_CASE__ ( ) -> int:
__lowerCamelCase : List[str] = env_command_parser()
__lowerCamelCase : Tuple = parser.parse_args()
env_command(lowerCamelCase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 652 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[int] = '''swin2sr'''
_UpperCAmelCase : Any = {
'''hidden_size''': '''embed_dim''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : Tuple=1_8_0 ,SCREAMING_SNAKE_CASE__ : Any=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : int=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=8 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2.0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : List[str]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=1E-5 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Tuple=1.0 ,SCREAMING_SNAKE_CASE__ : int="1conv" ,SCREAMING_SNAKE_CASE__ : Optional[int]="pixelshuffle" ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = image_size
__lowerCamelCase : str = patch_size
__lowerCamelCase : List[Any] = num_channels
__lowerCamelCase : Dict = embed_dim
__lowerCamelCase : Dict = depths
__lowerCamelCase : Any = len(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = num_heads
__lowerCamelCase : Tuple = window_size
__lowerCamelCase : Dict = mlp_ratio
__lowerCamelCase : str = qkv_bias
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = drop_path_rate
__lowerCamelCase : Optional[int] = hidden_act
__lowerCamelCase : Dict = use_absolute_embeddings
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : str = initializer_range
__lowerCamelCase : List[Any] = upscale
__lowerCamelCase : List[Any] = img_range
__lowerCamelCase : List[str] = resi_connection
__lowerCamelCase : Union[str, Any] = upsampler
| 652 | 1 |
from __future__ import annotations
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ = 0 ) -> Any:
'''simple docstring'''
__lowercase = key
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> list[str]:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCAmelCase__ ) ^ key ) for ch in content]
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> list[str]:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCAmelCase__ ) ^ key ) for ch in content]
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = 0 ) -> str:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
__lowercase = ''''''
for ch in content:
ans += chr(ord(lowerCAmelCase__ ) ^ key )
return ans
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = 0 ) -> str:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
__lowercase = ''''''
for ch in content:
ans += chr(ord(lowerCAmelCase__ ) ^ key )
return ans
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = 0 ) -> bool:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
try:
with open(lowerCAmelCase__ ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCAmelCase__ , lowerCAmelCase__ ) )
except OSError:
return False
return True
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> bool:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
try:
with open(lowerCAmelCase__ ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCAmelCase__ , lowerCAmelCase__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful") | 522 | from __future__ import annotations
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase ): # noqa: E741
"""simple docstring"""
while r - l > 1:
__lowercase = (l + r) // 2
if v[m] >= key:
__lowercase = m
else:
__lowercase = m # noqa: E741
return r
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if len(lowercase ) == 0:
return 0
__lowercase = [0] * len(lowercase )
__lowercase = 1
__lowercase = v[0]
for i in range(1 , len(lowercase ) ):
if v[i] < tail[0]:
__lowercase = v[i]
elif v[i] > tail[length - 1]:
__lowercase = v[i]
length += 1
else:
__lowercase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod() | 522 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class A__ :
"""simple docstring"""
def __init__( self : str , lowerCamelCase__ : str , lowerCamelCase__ : str=2 , lowerCamelCase__ : Dict=True , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : Tuple=10 , lowerCamelCase__ : int=3 , lowerCamelCase__ : Tuple=32 * 8 , lowerCamelCase__ : Union[str, Any]=32 * 8 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Optional[int]=64 , ):
a__ : List[str] = parent
a__ : List[Any] = batch_size
a__ : Any = is_training
a__ : Optional[Any] = use_auxiliary_loss
a__ : Dict = num_queries
a__ : Any = num_channels
a__ : List[Any] = min_size
a__ : Dict = max_size
a__ : Dict = num_labels
a__ : str = hidden_dim
a__ : int = hidden_dim
def _UpperCamelCase( self : Union[str, Any] ):
a__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
a__ : List[str] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
a__ : List[str] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
a__ : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
a__ : Tuple = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCamelCase( self : Union[str, Any] ):
a__ : Dict = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
a__ : str = self.num_queries
a__ : Tuple = self.num_labels
a__ : Optional[int] = [1, 1, 1, 1]
a__ : Dict = self.num_channels
a__ : int = 64
a__ : Optional[Any] = 128
a__ : Optional[int] = self.hidden_dim
a__ : Dict = self.hidden_dim
a__ : Optional[Any] = self.hidden_dim
return config
def _UpperCamelCase( self : Optional[Any] ):
a__, a__, a__, a__, a__ : Union[str, Any] = self.prepare_config_and_inputs()
a__ : Dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ):
a__ : Any = output.encoder_hidden_states
a__ : Optional[Any] = output.pixel_decoder_hidden_states
a__ : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=False ):
with torch.no_grad():
a__ : List[str] = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : int = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
a__ : List[str] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict ):
a__ : str = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Optional[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
a__ : Tuple = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
a__ : int = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
a__ : Optional[Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_lowercase = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : List[Any] ):
a__ : Dict = MaskaFormerModelTester(self )
a__ : List[str] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
self.config_tester.run_common_tests()
def _UpperCamelCase( self : int ):
a__, a__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def _UpperCamelCase( self : List[str] ):
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def _UpperCamelCase( self : str ):
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def _UpperCamelCase( self : Optional[Any] ):
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def _UpperCamelCase( self : Dict ):
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def _UpperCamelCase( self : Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _UpperCamelCase( self : Tuple ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCamelCase( self : int ):
pass
def _UpperCamelCase( self : Optional[int] ):
a__, a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Dict = model_class(lowerCamelCase__ )
a__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : List[str] = [*signature.parameters.keys()]
a__ : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : Any ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
a__ : Union[str, Any] = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _UpperCamelCase( self : int ):
a__ : int = (self.model_tester.min_size,) * 2
a__ : Tuple = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
a__ : int = self.model_tester.get_config()
a__ : Any = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
a__ : Optional[int] = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def _UpperCamelCase( self : Optional[Any] ):
a__, a__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__, a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Optional[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
a__ : Any = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def _UpperCamelCase( self : Optional[Any] ):
if not self.model_tester.is_training:
return
a__ : Union[str, Any] = self.all_model_classes[1]
a__, a__, a__, a__, a__ : List[str] = self.model_tester.prepare_config_and_inputs()
a__ : Tuple = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
a__ : int = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def _UpperCamelCase( self : Optional[int] ):
a__ : str = self.all_model_classes[1]
a__, a__, a__, a__, a__ : int = self.model_tester.prepare_config_and_inputs()
a__ : Optional[Any] = True
a__ : str = True
a__ : Any = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
a__ : Union[str, Any] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
a__ : Dict = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
a__ : Optional[Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
a__ : List[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
a__ : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCamelCase : Any = 1E-4
def UpperCamelCase_ ( ) -> str:
a__ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self : str ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _UpperCamelCase( self : str ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _UpperCamelCase( self : Dict ):
a__ : Dict = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
a__ : List[Any] = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Optional[Any] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
a__ : Dict = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
a__ : str = model(**lowerCamelCase__ )
a__ : str = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
a__ : List[str] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
a__ : Tuple = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def _UpperCamelCase( self : List[Any] ):
a__ : str = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
a__ : List[Any] = self.default_image_processor
a__ : str = prepare_img()
a__ : Union[str, Any] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
a__ : str = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
a__ : Union[str, Any] = model(**lowerCamelCase__ )
# masks_queries_logits
a__ : Tuple = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
a__ : Optional[int] = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
a__ : int = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
a__ : Any = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
a__ : List[str] = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def _UpperCamelCase( self : str ):
a__ : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
a__ : List[Any] = self.default_image_processor
a__ : Union[str, Any] = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
a__ : Any = inputs["pixel_values"].to(lowerCamelCase__ )
a__ : List[Any] = [el.to(lowerCamelCase__ ) for el in inputs["mask_labels"]]
a__ : List[Any] = [el.to(lowerCamelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
a__ : List[Any] = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 37 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__UpperCAmelCase : Union[str, Any] = 299_792_458
# Symbols
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = symbols('ct x y z')
def lowerCamelCase_ ( UpperCamelCase_ ):
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCamelCase_ ( UpperCamelCase_ ):
return 1 / sqrt(1 - beta(UpperCamelCase_ ) ** 2 )
def lowerCamelCase_ ( UpperCamelCase_ ):
return np.array(
[
[gamma(UpperCamelCase_ ), -gamma(UpperCamelCase_ ) * beta(UpperCamelCase_ ), 0, 0],
[-gamma(UpperCamelCase_ ) * beta(UpperCamelCase_ ), gamma(UpperCamelCase_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ = None ):
# Ensure event is not empty
if event is None:
_a : Dict = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCamelCase_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__UpperCAmelCase : Union[str, Any] = transform(29_979_245)
print('Example of four vector: ')
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
__UpperCAmelCase : Tuple = {ct: c, x: 1, y: 1, z: 1}
__UpperCAmelCase : str = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 471 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : int = {
"google/pegasus-large": "https://huggingface.co/google/pegasus-large/resolve/main/config.json",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : int = '''pegasus'''
UpperCAmelCase : Union[str, Any] = ['''past_key_values''']
UpperCAmelCase : List[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self : Tuple , A__ : Dict=5_0_2_6_5 , A__ : Dict=1_0_2_4 , A__ : int=1_2 , A__ : Dict=4_0_9_6 , A__ : Optional[Any]=1_6 , A__ : Dict=1_2 , A__ : Union[str, Any]=4_0_9_6 , A__ : str=1_6 , A__ : List[str]=0.0 , A__ : List[str]=0.0 , A__ : str=True , A__ : Any=True , A__ : List[str]="gelu" , A__ : Dict=1_0_2_4 , A__ : List[Any]=0.1 , A__ : str=0.0 , A__ : Tuple=0.0 , A__ : Union[str, Any]=0.0_2 , A__ : str=0 , A__ : Tuple=False , A__ : Union[str, Any]=0 , A__ : Any=1 , A__ : Union[str, Any]=1 , **A__ : Union[str, Any] , ) -> Optional[int]:
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = use_cache
lowercase = encoder_layers
lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=A__ , eos_token_id=A__ , is_encoder_decoder=A__ , decoder_start_token_id=A__ , forced_eos_token_id=A__ , **A__ , )
@property
def UpperCAmelCase__ (self : str ) -> int:
return self.encoder_attention_heads
@property
def UpperCAmelCase__ (self : Optional[int] ) -> int:
return self.d_model
| 721 |
'''simple docstring'''
from functools import lru_cache
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = 2
lowercase = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(lowerCAmelCase_ )
if n > 1:
factors.add(lowerCAmelCase_ )
return factors
@lru_cache
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return len(unique_prime_factors(lowerCAmelCase_ ) )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return len(set(lowerCAmelCase_ ) ) in (0, 1)
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = 2
while True:
# Increment each value of a generated range
lowercase = [base + i for i in range(lowerCAmelCase_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowercase = [upf_len(lowerCAmelCase_ ) for x in group]
checker.append(lowerCAmelCase_ )
# If all numbers in the list are equal, return the group variable.
if equality(lowerCAmelCase_ ):
return group
# Increment our base variable by 1
base += 1
def UpperCAmelCase_ ( lowerCAmelCase_ = 4 ):
"""simple docstring"""
lowercase = run(lowerCAmelCase_ )
return results[0] if len(lowerCAmelCase_ ) else None
if __name__ == "__main__":
print(solution())
| 459 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = ShapEPipeline
UpperCAmelCase__ = ['''prompt''']
UpperCAmelCase__ = ['''prompt''']
UpperCAmelCase__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ = False
@property
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->int:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
return 8
@property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]:
'''simple docstring'''
A__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
return tokenizer
@property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
'''simple docstring'''
torch.manual_seed(0)
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(UpperCAmelCase__)
@property
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0)
A__ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
A__ = PriorTransformer(**UpperCAmelCase__)
return model
@property
def SCREAMING_SNAKE_CASE ( self : str) ->List[str]:
'''simple docstring'''
torch.manual_seed(0)
A__ = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
A__ = ShapERenderer(**UpperCAmelCase__)
return model
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
'''simple docstring'''
A__ = self.dummy_prior
A__ = self.dummy_text_encoder
A__ = self.dummy_tokenizer
A__ = self.dummy_renderer
A__ = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=UpperCAmelCase__ , clip_sample=UpperCAmelCase__ , clip_sample_range=1.0 , )
A__ = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any]=0) ->Union[str, Any]:
'''simple docstring'''
if str(UpperCAmelCase__).startswith('''mps'''):
A__ = torch.manual_seed(UpperCAmelCase__)
else:
A__ = torch.Generator(device=UpperCAmelCase__).manual_seed(UpperCAmelCase__)
A__ = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]:
'''simple docstring'''
A__ = '''cpu'''
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**UpperCAmelCase__)
A__ = pipe.to(UpperCAmelCase__)
pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = pipe(**self.get_dummy_inputs(UpperCAmelCase__))
A__ = output.images[0]
A__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
A__ = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Any) ->int:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]:
'''simple docstring'''
A__ = torch_device == '''cpu'''
A__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCAmelCase__ , relax_max_difference=UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**UpperCAmelCase__)
A__ = pipe.to(UpperCAmelCase__)
pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = 1
A__ = 2
A__ = self.get_dummy_inputs(UpperCAmelCase__)
for key in inputs.keys():
if key in self.batch_params:
A__ = batch_size * [inputs[key]]
A__ = pipe(**UpperCAmelCase__ , num_images_per_prompt=UpperCAmelCase__)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]:
'''simple docstring'''
A__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''')
A__ = ShapEPipeline.from_pretrained('''openai/shap-e''')
A__ = pipe.to(UpperCAmelCase__)
pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = torch.Generator(device=UpperCAmelCase__).manual_seed(0)
A__ = pipe(
'''a shark''' , generator=UpperCAmelCase__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCAmelCase__ , UpperCAmelCase__)
| 87 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=7 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Optional[int]=30 , UpperCAmelCase_ : Any=400 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Dict=0.9 , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , )-> List[str]:
"""simple docstring"""
UpperCamelCase = size if size is not None else {"shortest_edge": 30}
UpperCamelCase = crop_size if crop_size is not None else {"height": 30, "width": 30}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize_and_center_crop
UpperCamelCase = size
UpperCamelCase = crop_pct
UpperCamelCase = crop_size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> str:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : List[Any] = PoolFormerImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> str:
"""simple docstring"""
UpperCamelCase = PoolFormerImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "crop_pct" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std" ) )
def _SCREAMING_SNAKE_CASE ( self : int )-> List[Any]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Any:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Dict:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> List[str]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : str )-> str:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 554 | 0 |
import sys
a_ : Tuple = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def __lowerCAmelCase ( _UpperCamelCase : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 1
for digit in s:
product *= int(_UpperCamelCase )
return product
def __lowerCAmelCase ( _UpperCamelCase : str = N ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = -sys.maxsize - 1
SCREAMING_SNAKE_CASE = n[:13]
SCREAMING_SNAKE_CASE = 13
while cur_index < len(_UpperCamelCase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
SCREAMING_SNAKE_CASE = substr[1:] + n[cur_index]
cur_index += 1
else:
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , str_eval(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 673 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 673 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE__ (__UpperCamelCase ):
__lowerCamelCase : Any = """deit"""
def __init__( self , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.0 , a=0.0 , a=0.02 , a=1e-12 , a=224 , a=16 , a=3 , a=True , a=16 , **a , ):
super().__init__(**__snake_case)
lowercase__ : Optional[int] = hidden_size
lowercase__ : List[str] = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : int = hidden_act
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : List[Any] = image_size
lowercase__ : Optional[int] = patch_size
lowercase__ : List[Any] = num_channels
lowercase__ : Union[str, Any] = qkv_bias
lowercase__ : int = encoder_stride
class SCREAMING_SNAKE_CASE__ (__UpperCamelCase ):
__lowerCamelCase : str = version.parse("""1.11""" )
@property
def snake_case_ ( self):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def snake_case_ ( self):
return 1e-4
| 164 |
from math import factorial
def a_ ( UpperCamelCase_ : int = 1_0_0 ) -> int:
"""simple docstring"""
return sum(int(UpperCamelCase_ ) for x in str(factorial(UpperCamelCase_ ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 246 | 0 |
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = word.split()
def justify(__snake_case , __snake_case , __snake_case ) -> str:
_UpperCamelCase = max_width - width
_UpperCamelCase = len(__UpperCamelCase )
if len(__UpperCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_UpperCamelCase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_UpperCamelCase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_UpperCamelCase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__UpperCamelCase ):
num_spaces_between_words_list[i] += 1
_UpperCamelCase = []
for i in range(__UpperCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__UpperCamelCase )
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = 0
for word in words:
if width + len(__UpperCamelCase ) + len(__UpperCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__UpperCamelCase )
width += len(__UpperCamelCase )
else:
# justify the line and add it to result
answer.append(justify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) )
# reset new line and new width
_UpperCamelCase = [word], len(__UpperCamelCase )
_UpperCamelCase = max_width - width - len(__UpperCamelCase )
answer.append(''' '''.join(__UpperCamelCase ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 721 | from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = "RegNetConfig"
# Base docstring
_lowerCAmelCase = "facebook/regnet-y-040"
_lowerCAmelCase = [1, 1_088, 7, 7]
# Image classification docstring
_lowerCAmelCase = "facebook/regnet-y-040"
_lowerCAmelCase = "tabby, tabby cat"
_lowerCAmelCase = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : str , _A : int , _A : int = 3 , _A : int = 1 , _A : int = 1 , _A : Optional[str] = "relu" , **_A : Any , ):
super().__init__(**_A )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_UpperCamelCase = tf.keras.layers.ConvaD(
filters=_A , kernel_size=_A , strides=_A , padding='''VALID''' , groups=_A , use_bias=_A , name='''convolution''' , )
_UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
_UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity
def UpperCamelCase_ ( self : Any , _A : Any ):
_UpperCamelCase = self.convolution(self.padding(_A ) )
_UpperCamelCase = self.normalization(_A )
_UpperCamelCase = self.activation(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] , _A : RegNetConfig , **_A : Any ):
super().__init__(**_A )
_UpperCamelCase = config.num_channels
_UpperCamelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def UpperCamelCase_ ( self : List[str] , _A : Optional[int] ):
_UpperCamelCase = shape_list(_A )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_UpperCamelCase = tf.transpose(_A , perm=(0, 2, 3, 1) )
_UpperCamelCase = self.embedder(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : str , _A : int , _A : int = 2 , **_A : Optional[Any] ):
super().__init__(**_A )
_UpperCamelCase = tf.keras.layers.ConvaD(
filters=_A , kernel_size=1 , strides=_A , use_bias=_A , name='''convolution''' )
_UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def UpperCamelCase_ ( self : str , _A : tf.Tensor , _A : bool = False ):
return self.normalization(self.convolution(_A ) , training=_A )
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Dict , _A : int , _A : int , **_A : Dict ):
super().__init__(**_A )
_UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' )
_UpperCamelCase = [
tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def UpperCamelCase_ ( self : List[str] , _A : List[Any] ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
_UpperCamelCase = self.pooler(_A )
for layer_module in self.attention:
_UpperCamelCase = layer_module(_A )
_UpperCamelCase = hidden_state * pooled
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : str ):
super().__init__(**_A )
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = max(1 , out_channels // config.groups_width )
_UpperCamelCase = (
TFRegNetShortCut(_A , stride=_A , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_UpperCamelCase = [
TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.2''' ),
]
_UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Dict , _A : Tuple ):
_UpperCamelCase = hidden_state
for layer_module in self.layers:
_UpperCamelCase = layer_module(_A )
_UpperCamelCase = self.shortcut(_A )
hidden_state += residual
_UpperCamelCase = self.activation(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : int ):
super().__init__(**_A )
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = max(1 , out_channels // config.groups_width )
_UpperCamelCase = (
TFRegNetShortCut(_A , stride=_A , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
_UpperCamelCase = [
TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.3''' ),
]
_UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Tuple , _A : List[Any] ):
_UpperCamelCase = hidden_state
for layer_module in self.layers:
_UpperCamelCase = layer_module(_A )
_UpperCamelCase = self.shortcut(_A )
hidden_state += residual
_UpperCamelCase = self.activation(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Tuple , _A : RegNetConfig , _A : int , _A : int , _A : int = 2 , _A : int = 2 , **_A : Union[str, Any] ):
super().__init__(**_A )
_UpperCamelCase = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
_UpperCamelCase = [
# downsampling is done in the first layer with stride of 2
layer(_A , _A , _A , stride=_A , name='''layers.0''' ),
*[layer(_A , _A , _A , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[int] ):
for layer_module in self.layers:
_UpperCamelCase = layer_module(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _A : RegNetConfig , **_A : List[str] ):
super().__init__(**_A )
_UpperCamelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
_UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_A , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_A , _A , _A , depth=_A , name=F"""stages.{i+1}""" ) )
def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : bool = False , _A : bool = True ):
_UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
_UpperCamelCase = stage_module(_A )
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A )
@keras_serializable
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
UpperCAmelCase = RegNetConfig
def __init__( self : int , _A : Tuple , **_A : int ):
super().__init__(**_A )
_UpperCamelCase = config
_UpperCamelCase = TFRegNetEmbeddings(_A , name='''embedder''' )
_UpperCamelCase = TFRegNetEncoder(_A , name='''encoder''' )
_UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' )
@unpack_inputs
def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.embedder(_A , training=_A )
_UpperCamelCase = self.encoder(
_A , output_hidden_states=_A , return_dict=_A , training=_A )
_UpperCamelCase = encoder_outputs[0]
_UpperCamelCase = self.pooler(_A )
# Change to NCHW output format have uniformity in the modules
_UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) )
_UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_UpperCamelCase = tuple([tf.transpose(_A , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_A , pooler_output=_A , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = RegNetConfig
UpperCAmelCase = "regnet"
UpperCAmelCase = "pixel_values"
@property
def UpperCamelCase_ ( self : Tuple ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
_lowerCAmelCase = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowerCAmelCase = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top.", __lowercase, )
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : List[Any] , _A : RegNetConfig , *_A : Optional[int] , **_A : Tuple ):
super().__init__(_A , *_A , **_A )
_UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase_ ( self : Any , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : Optional[int]=False , ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.regnet(
pixel_values=_A , output_hidden_states=_A , return_dict=_A , training=_A , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", __lowercase, )
class lowerCAmelCase_ ( __lowercase, __lowercase ):
def __init__( self : List[Any] , _A : RegNetConfig , *_A : Any , **_A : int ):
super().__init__(_A , *_A , **_A )
_UpperCamelCase = config.num_labels
_UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' )
# classification head
_UpperCamelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase_ ( self : str , _A : tf.Tensor = None , _A : tf.Tensor = None , _A : bool = None , _A : bool = None , _A : Any=False , ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.regnet(
_A , output_hidden_states=_A , return_dict=_A , training=_A )
_UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
_UpperCamelCase = self.classifier[0](_A )
_UpperCamelCase = self.classifier[1](_A )
_UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=_A , logits=_A )
if not return_dict:
_UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
| 71 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=7 ,snake_case__=3 ,snake_case__=30 ,snake_case__=400 ,snake_case__=True ,snake_case__=None ,snake_case__=True ,snake_case__=[0.5, 0.5, 0.5] ,snake_case__=[0.5, 0.5, 0.5] ,snake_case__=True ,snake_case__=1 / 255 ,snake_case__=True ,):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE_ : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : str = batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : Tuple = min_resolution
SCREAMING_SNAKE_CASE_ : Any = max_resolution
SCREAMING_SNAKE_CASE_ : str = do_resize
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size
SCREAMING_SNAKE_CASE_ : Tuple = do_normalize
SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_std
SCREAMING_SNAKE_CASE_ : Tuple = do_rescale
SCREAMING_SNAKE_CASE_ : Any = rescale_factor
SCREAMING_SNAKE_CASE_ : Any = do_pad
def snake_case ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case ( self ,snake_case__ ,snake_case__=False ):
if not batched:
SCREAMING_SNAKE_CASE_ : List[str] = image_inputs[0]
if isinstance(snake_case__ ,Image.Image ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_ : Dict = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE_ : str = self.size['shortest_edge']
SCREAMING_SNAKE_CASE_ : Optional[int] = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE_ : int = self.size['shortest_edge']
SCREAMING_SNAKE_CASE_ : List[Any] = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = max(snake_case__ ,key=lambda snake_case__ : item[0] )[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = max(snake_case__ ,key=lambda snake_case__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Dict = DetaImageProcessor if is_vision_available() else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DetaImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ ,'image_mean' ) )
self.assertTrue(hasattr(snake_case__ ,'image_std' ) )
self.assertTrue(hasattr(snake_case__ ,'do_normalize' ) )
self.assertTrue(hasattr(snake_case__ ,'do_resize' ) )
self.assertTrue(hasattr(snake_case__ ,'do_rescale' ) )
self.assertTrue(hasattr(snake_case__ ,'do_pad' ) )
self.assertTrue(hasattr(snake_case__ ,'size' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,snake_case__ )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.image_processor_tester.get_expected_values(snake_case__ ,batched=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor_tester.get_expected_values(snake_case__ ,batched=snake_case__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.image_processor_tester.get_expected_values(snake_case__ ,batched=snake_case__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def snake_case ( self ):
# prepare image and target
SCREAMING_SNAKE_CASE_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Any = {'image_id': 39769, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE_ : int = DetaImageProcessor()
SCREAMING_SNAKE_CASE_ : int = image_processing(images=snake_case__ ,annotations=snake_case__ ,return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE_ : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,snake_case__ ,atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,snake_case__ ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : int = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,snake_case__ ,atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : int = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,snake_case__ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,snake_case__ ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,snake_case__ ) )
# verify orig_size
SCREAMING_SNAKE_CASE_ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,snake_case__ ) )
# verify size
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,snake_case__ ) )
@slow
def snake_case ( self ):
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
SCREAMING_SNAKE_CASE_ : List[str] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE_ : Any = DetaImageProcessor(format='coco_panoptic' )
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(images=snake_case__ ,annotations=snake_case__ ,masks_path=snake_case__ ,return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,snake_case__ ,atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,snake_case__ ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,snake_case__ ,atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,snake_case__ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,snake_case__ ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,snake_case__ ) )
# verify masks
SCREAMING_SNAKE_CASE_ : Any = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,snake_case__ )
# verify orig_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,snake_case__ ) )
# verify size
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,snake_case__ ) )
| 105 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__ : int = TypeVar('''DatasetType''', Dataset, IterableDataset)
def __UpperCAmelCase ( lowerCamelCase_ : List[DatasetType] , lowerCamelCase_ : Optional[List[float]] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[DatasetInfo] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(lowerCamelCase_ ):
if not isinstance(lowerCamelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCamelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(lowerCamelCase_ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowerCamelCase_ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase_ ).__name__}.' )
if i == 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = (
(Dataset, IterableDataset) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , stopping_strategy=lowerCamelCase_ )
else:
return _interleave_iterable_datasets(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , stopping_strategy=lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[DatasetType] , lowerCamelCase_ : Optional[DatasetInfo] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(lowerCamelCase_ ):
if not isinstance(lowerCamelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCamelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(lowerCamelCase_ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowerCamelCase_ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase_ ).__name__}.' )
if i == 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = (
(Dataset, IterableDataset) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , axis=lowerCamelCase_ )
else:
return _concatenate_iterable_datasets(lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , axis=lowerCamelCase_ )
| 105 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class a ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = parent
__SCREAMING_SNAKE_CASE: int = batch_size
__SCREAMING_SNAKE_CASE: Tuple = num_channels
__SCREAMING_SNAKE_CASE: Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE: int = min_resolution
__SCREAMING_SNAKE_CASE: Optional[Any] = max_resolution
__SCREAMING_SNAKE_CASE: int = do_resize
__SCREAMING_SNAKE_CASE: int = size if size is not None else {'''height''': 18, '''width''': 20}
__SCREAMING_SNAKE_CASE: Tuple = do_thumbnail
__SCREAMING_SNAKE_CASE: List[Any] = do_align_axis
__SCREAMING_SNAKE_CASE: Tuple = do_pad
__SCREAMING_SNAKE_CASE: List[str] = do_normalize
__SCREAMING_SNAKE_CASE: int = image_mean
__SCREAMING_SNAKE_CASE: Tuple = image_std
def snake_case_ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a ( __lowercase ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : int = DonutImageProcessor if is_vision_available() else None
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = DonutImageProcessingTester(self )
@property
def snake_case_ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''image_std''' ) )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
__SCREAMING_SNAKE_CASE: Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def snake_case_ ( self ):
"""simple docstring"""
pass
@is_flaky()
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE: int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE: Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE: Optional[Any] = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE: Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE: List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE: Optional[Any] = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE: Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE: List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE: List[str] = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 146 |
from manim import *
class a ( __lowercase ):
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = Rectangle(height=0.5 , width=0.5 )
__SCREAMING_SNAKE_CASE: Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__SCREAMING_SNAKE_CASE: Tuple = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE: Optional[int] = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE: Optional[int] = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__SCREAMING_SNAKE_CASE: Union[str, Any] = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__SCREAMING_SNAKE_CASE: Optional[int] = VGroup(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__SCREAMING_SNAKE_CASE: List[str] = Text('''CPU''' , font_size=24 )
__SCREAMING_SNAKE_CASE: List[str] = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[Any] = [mem.copy() for i in range(4 )]
__SCREAMING_SNAKE_CASE: Tuple = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__SCREAMING_SNAKE_CASE: Optional[Any] = Text('''GPU''' , font_size=24 )
__SCREAMING_SNAKE_CASE: Union[str, Any] = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[int] = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE: str = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__SCREAMING_SNAKE_CASE: Any = Text('''Model''' , font_size=24 )
__SCREAMING_SNAKE_CASE: List[str] = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = []
for i, rect in enumerate(_lowerCAmelCase ):
rect.set_stroke(_lowerCAmelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__SCREAMING_SNAKE_CASE: int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowerCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=_lowerCAmelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=_lowerCAmelCase , buff=0.0 )
self.add(_lowerCAmelCase )
cpu_targs.append(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Dict = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE: Union[str, Any] = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__SCREAMING_SNAKE_CASE: str = Text('''Loaded Checkpoint''' , font_size=24 )
__SCREAMING_SNAKE_CASE: Dict = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , aligned_edge=_lowerCAmelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
__SCREAMING_SNAKE_CASE: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__SCREAMING_SNAKE_CASE: Optional[int] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[int] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(_lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
__SCREAMING_SNAKE_CASE: Optional[int] = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase ) , Write(_lowerCAmelCase ) )
self.play(Write(_lowerCAmelCase , run_time=1 ) , Create(_lowerCAmelCase , run_time=1 ) )
__SCREAMING_SNAKE_CASE: List[str] = []
__SCREAMING_SNAKE_CASE: Tuple = []
for i, rect in enumerate(_lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: int = fill.copy().set_fill(_lowerCAmelCase , opacity=0.7 )
target.move_to(_lowerCAmelCase )
first_animations.append(GrowFromCenter(_lowerCAmelCase , run_time=1 ) )
__SCREAMING_SNAKE_CASE: List[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(_lowerCAmelCase , run_time=1.5 ) )
self.play(*_lowerCAmelCase )
self.play(*_lowerCAmelCase )
self.wait()
| 146 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
lowerCAmelCase_ : List[str] = list[list[float | int]]
def UpperCAmelCase ( A : List[Any] , A : List[str] ):
SCREAMING_SNAKE_CASE : int = len(_a )
SCREAMING_SNAKE_CASE : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_a )]
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : float
for row in range(_a ):
for col in range(_a ):
SCREAMING_SNAKE_CASE : List[Any] = matrix[row][col]
SCREAMING_SNAKE_CASE : int = vector[row][0]
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : List[str] = 0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE : List[str] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_a , _a ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE : int = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _a ):
SCREAMING_SNAKE_CASE : Dict = augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE : int = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _a ):
for row in range(_a ):
SCREAMING_SNAKE_CASE : List[Any] = augmented[row][col] / augmented[col][col]
for cola in range(_a , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_a )
]
def UpperCAmelCase ( A : int ):
SCREAMING_SNAKE_CASE : int = len(_a )
SCREAMING_SNAKE_CASE : Matrix = [[0 for _ in range(_a )] for _ in range(_a )]
SCREAMING_SNAKE_CASE : Matrix = [[0] for _ in range(_a )]
SCREAMING_SNAKE_CASE : Matrix
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
for x_val, y_val in enumerate(_a ):
for col in range(_a ):
SCREAMING_SNAKE_CASE : Any = (x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE : Tuple = y_val
SCREAMING_SNAKE_CASE : Any = solve(_a , _a )
def interpolated_func(A : Tuple ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_a ) )
return interpolated_func
def UpperCAmelCase ( A : List[str] ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase ( A : str = question_function , A : Optional[int] = 10 ):
SCREAMING_SNAKE_CASE : list[int] = [func(_a ) for x_val in range(1 , order + 1 )]
SCREAMING_SNAKE_CASE : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Callable[[int], int]
SCREAMING_SNAKE_CASE : int
for poly in polynomials:
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
while func(_a ) == poly(_a ):
x_val += 1
ret += poly(_a )
return ret
if __name__ == "__main__":
print(f'{solution() = }')
| 527 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE : Union[str, Any] = False
return options
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Tuple = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=10 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
SCREAMING_SNAKE_CASE : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : List[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Dict = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=20 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[str] = output.images
SCREAMING_SNAKE_CASE : Optional[int] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 | 25 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 715 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
def __init__( self , a__ , a__=13 , a__=30 , a__=2 , a__=3 , a__=True , a__=True , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.02 , a__=3 , a__=0.6 , a__=None , ):
A_ : int = parent
A_ : Optional[int] = batch_size
A_ : Any = image_size
A_ : Optional[int] = patch_size
A_ : int = num_channels
A_ : str = is_training
A_ : str = use_labels
A_ : str = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : Any = intermediate_size
A_ : List[Any] = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : str = type_sequence_label_size
A_ : int = initializer_range
A_ : List[Any] = mask_ratio
A_ : str = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A_ : Optional[Any] = (image_size // patch_size) ** 2
A_ : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowerCamelCase ( self ):
A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : int = None
if self.use_labels:
A_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Any = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _lowerCamelCase ( self , a__ , a__ , a__ ):
A_ : Optional[int] = ViTMAEModel(config=a__ )
model.to(a__ )
model.eval()
A_ : int = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , a__ , a__ , a__ ):
A_ : int = ViTMAEForPreTraining(a__ )
model.to(a__ )
model.eval()
A_ : Optional[Any] = model(a__ )
A_ : Dict = (self.image_size // self.patch_size) ** 2
A_ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A_ : Optional[int] = 1
A_ : Any = ViTMAEForPreTraining(a__ )
model.to(a__ )
model.eval()
A_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : List[Any] = model(a__ )
A_ : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _lowerCamelCase ( self ):
A_ : Optional[Any] = self.prepare_config_and_inputs()
A_ , A_ , A_ : Any = config_and_inputs
A_ : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
a = False
a = False
a = False
a = False
def _lowerCamelCase ( self ):
A_ : int = ViTMAEModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def _lowerCamelCase ( self ):
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Any = model_class(a__ )
A_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Dict = [*signature.parameters.keys()]
A_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def _lowerCamelCase ( self ):
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def _lowerCamelCase ( self ):
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a__ )
def _lowerCamelCase ( self , a__ , a__ , a__ ):
# make masks reproducible
np.random.seed(2 )
A_ : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
A_ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A_ : Optional[Any] = torch.from_numpy(a__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A_ : Any = pt_noise
super().check_pt_tf_models(a__ , a__ , a__ )
def _lowerCamelCase ( self ):
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(a__ )
model.to(a__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(a__ , a__ ) )
A_ : int = outputs[0].cpu().numpy()
A_ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ )
A_ : Union[str, Any] = model_class.from_pretrained(a__ )
model.to(a__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ : Optional[int] = model(**self._prepare_for_class(a__ , a__ ) )
# Make sure we don't have nans
A_ : Optional[int] = after_outputs[0].cpu().numpy()
A_ : str = 0
A_ : Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a__ , 1E-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCamelCase ( self ):
pass
@slow
def _lowerCamelCase ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = ViTMAEModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def _lowerCAmelCase ( ):
'''simple docstring'''
A_ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
A_ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(a__ )
A_ : Optional[Any] = self.default_image_processor
A_ : Union[str, Any] = prepare_img()
A_ : Union[str, Any] = image_processor(images=a__ , return_tensors="""pt""" ).to(a__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A_ : Optional[int] = ViTMAEConfig()
A_ : Tuple = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A_ : Tuple = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
A_ : Dict = model(**a__ , noise=torch.from_numpy(a__ ).to(device=a__ ) )
# verify the logits
A_ : Tuple = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , a__ )
A_ : List[str] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(a__ ) , atol=1E-4 ) )
| 481 | 0 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
lowercase_ = parser.parse_args()
lowercase_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowercase_ = CLIPImageProcessor()
lowercase_ = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
lowercase_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 11 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''data2vec-vision'''
def __init__( self : Optional[int] , _A : List[Any]=768 , _A : Any=12 , _A : str=12 , _A : Union[str, Any]=3072 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.0 , _A : Dict=0.0 , _A : Dict=0.02 , _A : Any=1e-12 , _A : Optional[Any]=224 , _A : Union[str, Any]=16 , _A : Tuple=3 , _A : List[Any]=False , _A : List[str]=False , _A : Dict=False , _A : Dict=False , _A : Any=0.1 , _A : List[str]=0.1 , _A : Dict=True , _A : Dict=[3, 5, 7, 11] , _A : Union[str, Any]=[1, 2, 3, 6] , _A : Optional[Any]=True , _A : Any=0.4 , _A : List[str]=256 , _A : Any=1 , _A : Any=False , _A : Union[str, Any]=255 , **_A : Tuple , ):
"""simple docstring"""
super().__init__(**_A )
__SCREAMING_SNAKE_CASE : Any = hidden_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Any = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = patch_size
__SCREAMING_SNAKE_CASE : Any = num_channels
__SCREAMING_SNAKE_CASE : List[str] = use_mask_token
__SCREAMING_SNAKE_CASE : List[Any] = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = use_relative_position_bias
__SCREAMING_SNAKE_CASE : str = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value
__SCREAMING_SNAKE_CASE : str = drop_path_rate
__SCREAMING_SNAKE_CASE : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : str = out_indices
__SCREAMING_SNAKE_CASE : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head
__SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels
__SCREAMING_SNAKE_CASE : List[Any] = auxiliary_num_convs
__SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_concat_input
__SCREAMING_SNAKE_CASE : Any = semantic_loss_ignore_index
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return 1e-4
| 74 | 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if issubclass(lowercase , lowercase ):
UpperCamelCase = parquet_path
elif issubclass(lowercase , lowercase ):
UpperCamelCase = [parquet_path]
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
def A ( lowercase , lowercase , lowercase=("train",) ) -> Tuple:
'''simple docstring'''
assert isinstance(lowercase , lowercase )
for split in splits:
UpperCamelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = ParquetDatasetReader({'train': parquet_path} , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if split:
UpperCamelCase = {split: parquet_path}
else:
UpperCamelCase = 'train'
UpperCamelCase = {'train': parquet_path, 'test': parquet_path}
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A ( lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCamelCase = pq.ParquetFile(tmp_path / 'foo.parquet' )
UpperCamelCase = pf.read()
assert dataset.data.table == output_table
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = str(shared_datadir / 'test_image_rgb.jpg' )
UpperCamelCase = {'image': [image_path]}
UpperCamelCase = Features({'image': Image()} )
UpperCamelCase = Dataset.from_dict(lowercase , features=lowercase )
UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCamelCase = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
UpperCamelCase = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def A ( lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
assert get_writer_batch_size(lowercase ) == expected
| 3 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = (DDPMScheduler,)
def __UpperCamelCase ( self , **A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = {
'num_train_timesteps': 1_000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**A_ )
return config
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=A_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=A_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
UpperCamelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(A_ ) )
UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type='v_prediction' )
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
UpperCamelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(A_ ) )
UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A_ )
UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(A_ ):
if i == len(A_ ) - 1:
UpperCamelCase = -1
else:
UpperCamelCase = timesteps[i + 1]
UpperCamelCase = scheduler.previous_timestep(A_ )
UpperCamelCase = prev_t.item()
self.assertEqual(A_ , A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(A_ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 1, 0]
UpperCamelCase = len(A_ )
with self.assertRaises(A_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=A_ , timesteps=A_ )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=A_ )
| 3 | 1 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
snake_case = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
snake_case = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
snake_case = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
return float((preds == labels).mean() )
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = simple_accuracy(__lowercase , __lowercase )
_lowerCAmelCase : str = float(fa_score(y_true=__lowercase , y_pred=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : str = float(pearsonr(__lowercase , __lowercase )[0] )
_lowerCAmelCase : str = float(spearmanr(__lowercase , __lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__A , __A )}
elif self.config_name == "stsb":
return pearson_and_spearman(__A , __A )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__A , __A )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__A , __A )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
| 424 |
'''simple docstring'''
def lowercase__ ( __lowercase : int , __lowercase : Tuple , __lowercase : Tuple ) -> Any:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__lowercase , n - 1 , __lowercase ) * a) % mod
else:
__UpperCamelCase = binary_exponentiation(__lowercase , n / 2 , __lowercase )
return (b * b) % mod
# a prime number
a__ : List[str] =701
a__ : Union[str, Any] =1_000_000_000
a__ : Union[str, Any] =10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 399 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 715 | import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ''
_UpperCAmelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCAmelCase = None # compression type in fsspec. ex: "gzip"
_UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ):
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase :Optional[Any] = fsspec.open(
__snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCamelCase :Dict = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCamelCase :List[str] = None
@classmethod
def snake_case ( cls : Any , __snake_case : Any ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__snake_case ).lstrip('''/''' )
def snake_case ( self : Any ):
if self.dir_cache is None:
lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCamelCase :Optional[Any] = {f['''name''']: f}
def snake_case ( self : Union[str, Any] , __snake_case : str ):
return self.file.open().read()
def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ):
lowerCamelCase :List[str] = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = '.bz2'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = '.gz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = '.lz4'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'xz'
_UpperCAmelCase = 'xz'
_UpperCAmelCase = '.xz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = '.zst'
def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ):
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase :Tuple = self.file.__enter__
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : Tuple ):
lowerCamelCase :Optional[int] = file_
def __enter__( self : Optional[int] ):
self._file.__enter__()
return self
def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self : Optional[Any] ):
return iter(self._file )
def snake_case ( self : List[Any] ):
return next(self._file )
def __getattr__( self : Any , __snake_case : str ):
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
lowerCamelCase :Dict = fixed_enter
| 49 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : int = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 184 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A__ ( __A : Any , __A : str , __A : str , __A : Path , __A : str = None , __A : str = None , __A : str = None , ) ->Optional[Any]:
if config_name_or_path is None:
__A ='''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
__A =generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__A =question_encoder_name_or_path
__A =RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
__A =RagConfig.from_pretrained(__A )
__A =AutoConfig.from_pretrained(__A )
__A =AutoConfig.from_pretrained(__A )
__A =gen_config
__A =question_encoder_config
__A =model_class.from_pretrained_question_encoder_generator(
__A , __A , config=__A )
rag_model.save_pretrained(__A )
# Sanity check.
model_class.from_pretrained(__A )
# Save tokenizers.
__A =AutoTokenizer.from_pretrained(__A )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
__A =AutoTokenizer.from_pretrained(__A )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
_lowerCamelCase : str = parser.parse_args()
_lowerCamelCase : Optional[int] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 184 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTTokenizer
lowerCAmelCase = OpenAIGPTTokenizerFast
lowerCAmelCase = True
lowerCAmelCase = False
def __A ( self : Tuple ) -> Any:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCAmelCase = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowerCAmelCase = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE : Dict ) -> Dict:
"""simple docstring"""
return "lower newer", "lower newer"
def __A ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase = "lower"
lowerCAmelCase = ["low", "er</w>"]
lowerCAmelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokens + ["<unk>"]
lowerCAmelCase = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __A ( self : Dict , SCREAMING_SNAKE_CASE : str=1_5 ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Simple input
lowerCAmelCase = "This is a simple input"
lowerCAmelCase = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase = ("This is a simple input", "This is a pair")
lowerCAmelCase = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
def __A ( self : List[Any] ) -> int:
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
pass
| 159 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowercase : List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = ['pixel_values']
def __init__( self : Any , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 8 , **SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_pad
lowerCAmelCase = pad_size
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Union[str, Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __A ( self : int , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ) -> int:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = get_image_size(SCREAMING_SNAKE_CASE )
lowerCAmelCase = (old_height // size + 1) * size - old_height
lowerCAmelCase = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=SCREAMING_SNAKE_CASE )
def __A ( self : Any , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Optional[int] , ) -> Dict:
"""simple docstring"""
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_pad if do_pad is not None else self.do_pad
lowerCAmelCase = pad_size if pad_size is not None else self.pad_size
lowerCAmelCase = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_pad:
lowerCAmelCase = [self.pad(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 159 | 1 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
A__ : int = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
A__ : Union[str, Any] = logging.getLogger()
def _a ( ):
lowerCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
lowerCAmelCase__ : List[str] = parser.parse_args()
return args.f
def _a ( __UpperCamelCase : int ,__UpperCamelCase : List[str]="eval" ):
lowerCAmelCase__ : Optional[int] = os.path.join(__UpperCamelCase ,f'''{split}_results.json''' )
if os.path.exists(__UpperCamelCase ):
with open(__UpperCamelCase ,'''r''' ) as f:
return json.load(__UpperCamelCase )
raise ValueError(f'''can\'t find {path}''' )
A__ : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase ( __UpperCamelCase ):
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : Optional[int] = f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__ ):
run_flax_glue.main()
lowerCAmelCase__ : Dict = get_results(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Dict = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : Optional[Any] = f'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__ ):
run_clm_flax.main()
lowerCAmelCase__ : Optional[Any] = get_results(SCREAMING_SNAKE_CASE__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : str = f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__ ):
run_summarization_flax.main()
lowerCAmelCase__ : Union[str, Any] = get_results(SCREAMING_SNAKE_CASE__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : str = f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__ ):
run_mlm_flax.main()
lowerCAmelCase__ : Tuple = get_results(SCREAMING_SNAKE_CASE__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Any = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : int = f'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__ ):
run_ta_mlm_flax.main()
lowerCAmelCase__ : List[Any] = get_results(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Dict = 7 if get_gpu_count() > 1 else 2
lowerCAmelCase__ : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : List[str] = f'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__ ):
run_flax_ner.main()
lowerCAmelCase__ : List[Any] = get_results(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : int = f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__ ):
run_qa.main()
lowerCAmelCase__ : Any = get_results(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 233 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowercase ( __UpperCamelCase ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : List[str] = config_class
lowerCAmelCase__ : str = has_text_modality
lowerCAmelCase__ : str = kwargs
lowerCAmelCase__ : Optional[int] = common_properties
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Any = self.config_class(**self.inputs_dict )
lowerCAmelCase__ : List[str] = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , msg=f'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(SCREAMING_SNAKE_CASE__ ):
try:
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , msg=f'''`{name} value {idx} expected, but was {getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(SCREAMING_SNAKE_CASE__ ):
try:
lowerCAmelCase__ : Dict = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , msg=f'''`{name} value {idx} expected, but was {getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.config_class(**self.inputs_dict )
lowerCAmelCase__ : Tuple = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , '''config.json''' )
config_first.to_json_file(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Optional[int] = self.config_class.from_json_file(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : List[str] = self.config_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = self.config_class(**self.inputs_dict )
lowerCAmelCase__ : str = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
config_first.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Tuple = self.config_class.from_pretrained(SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
lowerCAmelCase__ : Optional[int] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def lowercase_ ( self ):
"""simple docstring"""
if self.config_class.is_composition:
return
lowerCAmelCase__ : Union[str, Any] = self.config_class()
self.parent.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Union[str, Any] = self.config_class(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : str = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) != value:
wrong_values.append((key, getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), value) )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
lowerCAmelCase__ : Tuple = '''\n'''.join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' )
def lowercase_ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 233 | 1 |
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
UpperCAmelCase = [0] * len(_snake_case )
for i in range(1 , len(_snake_case ) ):
# use last results for better performance - dynamic programming
UpperCAmelCase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
UpperCAmelCase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
UpperCAmelCase = j
return prefix_result
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
return max(prefix_function(_snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowerCAmelCase ( A__: str , A__: str , A__: str , A__: PreTrainedTokenizer , A__: int , A__: Optional[int] = None , ):
'''simple docstring'''
UpperCAmelCase = {}
if train_file is not None:
UpperCAmelCase = [train_file]
if eval_file is not None:
UpperCAmelCase = [eval_file]
if test_file is not None:
UpperCAmelCase = [test_file]
UpperCAmelCase = datasets.load_dataset('''csv''' , data_files=A__ )
UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() )
UpperCAmelCase = features_name.pop(A__ )
UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
UpperCAmelCase = {label: i for i, label in enumerate(A__ )}
UpperCAmelCase = tokenizer.model_input_names
UpperCAmelCase = {}
if len(A__ ) == 1:
for k in files.keys():
UpperCAmelCase = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=A__ , max_length=A__ , padding='''max_length''' ) , batched=A__ , )
elif len(A__ ) == 2:
for k in files.keys():
UpperCAmelCase = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=A__ , max_length=A__ , padding='''max_length''' , ) , batched=A__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
UpperCAmelCase = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
UpperCAmelCase = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
UpperCAmelCase = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__magic_name__ = logging.getLogger(__name__)
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(metadata={"""help""": """Which column contains the label"""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """The path of the training file"""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """The path of the development file"""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """The path of the test file"""} )
__SCREAMING_SNAKE_CASE = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=A__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
def compute_metrics(A__: EvalPrediction ) -> Dict:
UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
UpperCAmelCase = TFTrainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(A__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(A__ )
return results
if __name__ == "__main__":
main()
| 391 | 0 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **UpperCAmelCase__ : Union[str, Any]) ->Any:
'''simple docstring'''
A__ = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase__)
return config
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Dict:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase__)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase__ , prediction_type=UpperCAmelCase__ , sample_max_value=UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : List[str]) ->str:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.00979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1e-5
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = len(UpperCAmelCase__)
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase__)):
# 1. predict noise residual
A__ = model(UpperCAmelCase__ , UpperCAmelCase__)
# 2. predict previous mean of sample x_t-1
A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A__ = pred_prev_sample
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 258.9606) < 1e-2
assert abs(result_mean.item() - 0.3372) < 1e-3
def SCREAMING_SNAKE_CASE ( self : int) ->int:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(prediction_type='''v_prediction''')
A__ = scheduler_class(**UpperCAmelCase__)
A__ = len(UpperCAmelCase__)
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase__)):
# 1. predict noise residual
A__ = model(UpperCAmelCase__ , UpperCAmelCase__)
# 2. predict previous mean of sample x_t-1
A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A__ = pred_prev_sample
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 202.0296) < 1e-2
assert abs(result_mean.item() - 0.2631) < 1e-3
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase__)
A__ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase__):
if i == len(UpperCAmelCase__) - 1:
A__ = -1
else:
A__ = timesteps[i + 1]
A__ = scheduler.previous_timestep(UpperCAmelCase__)
A__ = prev_t.item()
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase__ , msg='''`custom_timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = [100, 87, 50, 1, 0]
A__ = len(UpperCAmelCase__)
with self.assertRaises(UpperCAmelCase__ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase__ , timesteps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase__ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__)
| 87 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_UpperCamelCase = get_logger(__name__)
_UpperCamelCase = Path(__file__).parent / """model_card_template.md"""
_UpperCamelCase = uuida().hex
_UpperCamelCase = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
_UpperCamelCase = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
_UpperCamelCase = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def _a ( _snake_case = None ):
"""simple docstring"""
UpperCAmelCase = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_snake_case , _snake_case ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(_snake_case , _snake_case ):
ua += "; " + user_agent
return ua
def _a ( _snake_case , _snake_case = None , _snake_case = None ):
"""simple docstring"""
if token is None:
UpperCAmelCase = HfFolder.get_token()
if organization is None:
UpperCAmelCase = whoami(_snake_case )["""name"""]
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(_snake_case , """local_rank""" ) and args.local_rank not in [-1, 0]:
return
UpperCAmelCase = args.hub_token if hasattr(_snake_case , """hub_token""" ) else None
UpperCAmelCase = get_full_repo_name(_snake_case , token=_snake_case )
UpperCAmelCase = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_snake_case , model_name=_snake_case , repo_name=_snake_case , dataset_name=args.dataset_name if hasattr(_snake_case , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_snake_case , """gradient_accumulation_steps""" ) else None
) , adam_betaa=args.adam_betaa if hasattr(_snake_case , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(_snake_case , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_snake_case , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(_snake_case , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(_snake_case , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_snake_case , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_snake_case , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(_snake_case , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(_snake_case , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , )
UpperCAmelCase = os.path.join(args.output_dir , """README.md""" )
model_card.save(_snake_case )
def _a ( _snake_case , _snake_case = None ):
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
UpperCAmelCase = str(Path(_snake_case ).as_posix() )
UpperCAmelCase = re.search(R"""snapshots/([^/]+)/""" , _snake_case )
if search is None:
return None
UpperCAmelCase = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_snake_case ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_UpperCamelCase = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
_UpperCamelCase = os.path.join(hf_cache_home, """diffusers""")
def _a ( _snake_case = None , _snake_case = None ):
"""simple docstring"""
if new_cache_dir is None:
UpperCAmelCase = DIFFUSERS_CACHE
if old_cache_dir is None:
UpperCAmelCase = old_diffusers_cache
UpperCAmelCase = Path(_snake_case ).expanduser()
UpperCAmelCase = Path(_snake_case ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
UpperCAmelCase = new_cache_dir / old_blob_path.relative_to(_snake_case )
new_blob_path.parent.mkdir(parents=_snake_case , exist_ok=_snake_case )
os.replace(_snake_case , _snake_case )
try:
os.symlink(_snake_case , _snake_case )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_UpperCamelCase = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
_UpperCamelCase = 0
else:
with open(cache_version_file) as f:
try:
_UpperCamelCase = int(f.read())
except ValueError:
_UpperCamelCase = 0
if cache_version < 1:
_UpperCamelCase = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
_UpperCamelCase = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
"""the directory exists and can be written to."""
)
def _a ( _snake_case , _snake_case = None ):
"""simple docstring"""
if variant is not None:
UpperCAmelCase = weights_name.split(""".""" )
UpperCAmelCase = splits[:-1] + [variant] + splits[-1:]
UpperCAmelCase = """.""".join(_snake_case )
return weights_name
def _a ( _snake_case , *,
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , ):
"""simple docstring"""
UpperCAmelCase = str(_snake_case )
if os.path.isfile(_snake_case ):
return pretrained_model_name_or_path
elif os.path.isdir(_snake_case ):
if os.path.isfile(os.path.join(_snake_case , _snake_case ) ):
# Load from a PyTorch checkpoint
UpperCAmelCase = os.path.join(_snake_case , _snake_case )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_snake_case , _snake_case , _snake_case ) ):
UpperCAmelCase = os.path.join(_snake_case , _snake_case , _snake_case )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_snake_case ).base_version ) >= version.parse("""0.20.0""" )
):
try:
UpperCAmelCase = hf_hub_download(
_snake_case , filename=_add_variant(_snake_case , _snake_case ) , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , use_auth_token=_snake_case , user_agent=_snake_case , subfolder=_snake_case , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , _snake_case , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_snake_case , _snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(_snake_case , _snake_case )}\' so that the correct variant file can be added.''' , _snake_case , )
try:
# 2. Load model file as usual
UpperCAmelCase = hf_hub_download(
_snake_case , filename=_snake_case , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , use_auth_token=_snake_case , user_agent=_snake_case , subfolder=_snake_case , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"""listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"""this model name. Check the model page at """
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"""'https://huggingface.co/models', make sure you don't have a local directory with the same name. """
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' )
| 341 | 0 |
'''simple docstring'''
import string
def A_ ( __SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
__A : int = """"""
for i in sequence:
__A : Any = ord(__SCREAMING_SNAKE_CASE )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def A_ ( __SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
__A : Dict = string.ascii_letters
__A : Dict = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(__SCREAMING_SNAKE_CASE )] if c in letters else c for c in sequence )
def A_ ( ) -> None:
"""simple docstring"""
from timeit import timeit
print("""Running performance benchmarks...""" )
__A : Any = """from string import printable ; from __main__ import atbash, atbash_slow"""
print(F"> atbash_slow(): {timeit('atbash_slow(printable)' , setup=__SCREAMING_SNAKE_CASE )} seconds" )
print(F"> atbash(): {timeit('atbash(printable)' , setup=__SCREAMING_SNAKE_CASE )} seconds" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"{example} encrypted in atbash: {atbash(example)}")
benchmark()
| 717 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A__ : List[str] ='pt'
elif is_tf_available():
A__ : List[str] ='tf'
else:
A__ : List[str] ='jax'
class __A ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCamelCase =PerceiverTokenizer
lowerCamelCase =False
def lowercase_( self : int ):
"""simple docstring"""
super().setUp()
__A : Optional[int] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_( self : Any ):
"""simple docstring"""
return PerceiverTokenizer.from_pretrained("""deepmind/language-perceiver""" )
def lowercase_( self : Optional[Any] , **lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowercase_( self : Any , lowerCamelCase : List[Any] , lowerCamelCase : Any=False , lowerCamelCase : List[Any]=20 , lowerCamelCase : List[str]=5 ):
"""simple docstring"""
__A : Optional[Any] = []
for i in range(len(lowerCamelCase ) ):
try:
__A : str = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__A : List[str] = list(filter(lambda lowerCamelCase : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , lowerCamelCase ) )
__A : List[str] = list(filter(lambda lowerCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCamelCase ) , lowerCamelCase ) )
if max_length is not None and len(lowerCamelCase ) > max_length:
__A : Optional[Any] = toks[:max_length]
if min_length is not None and len(lowerCamelCase ) < min_length and len(lowerCamelCase ) > 0:
while len(lowerCamelCase ) < min_length:
__A : Dict = toks + toks
# toks_str = [t[1] for t in toks]
__A : Optional[int] = [t[0] for t in toks]
# Ensure consistency
__A : Optional[Any] = tokenizer.decode(lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
if " " not in output_txt and len(lowerCamelCase ) > 1:
__A : int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCamelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCamelCase )
)
if with_prefix_space:
__A : Union[str, Any] = """ """ + output_txt
__A : Any = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
return output_txt, output_ids
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
__A : int = self.perceiver_tokenizer
__A : List[str] = """Unicode €."""
__A : Dict = tokenizer(lowerCamelCase )
__A : Any = [4, 91, 1_16, 1_11, 1_05, 1_17, 1_06, 1_07, 38, 2_32, 1_36, 1_78, 52, 5]
self.assertEqual(encoded["""input_ids"""] , lowerCamelCase )
# decoding
__A : Dict = tokenizer.decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , """[CLS]Unicode €.[SEP]""" )
__A : int = tokenizer("""e è é ê ë""" )
__A : str = [4, 1_07, 38, 2_01, 1_74, 38, 2_01, 1_75, 38, 2_01, 1_76, 38, 2_01, 1_77, 5]
self.assertEqual(encoded["""input_ids"""] , lowerCamelCase )
# decoding
__A : Optional[Any] = tokenizer.decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , """[CLS]e è é ê ë[SEP]""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """[CLS]e è é ê ë[SEP]""" )
def lowercase_( self : Union[str, Any] ):
"""simple docstring"""
__A : Optional[Any] = self.perceiver_tokenizer
__A : int = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
__A : Any = [4, 71, 38, 1_14, 1_17, 1_16, 1_09, 38, 1_18, 1_03, 1_20, 1_03, 1_09, 1_20, 1_03, 1_18, 1_10, 38, 1_08, 1_17, 1_20, 38, 1_21, 1_23, 1_15, 1_15, 1_03, 1_20, 1_11, 1_28, 1_03, 1_22, 1_11, 1_17, 1_16, 52, 5, 0]
# fmt: on
__A : Optional[int] = tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
if FRAMEWORK != "jax":
__A : str = list(batch.input_ids.numpy()[0] )
else:
__A : Optional[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
__A : str = self.perceiver_tokenizer
__A : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__A : Optional[int] = tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , lowerCamelCase )
self.assertIn("""attention_mask""" , lowerCamelCase )
self.assertNotIn("""decoder_input_ids""" , lowerCamelCase )
self.assertNotIn("""decoder_attention_mask""" , lowerCamelCase )
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
__A : Optional[int] = self.perceiver_tokenizer
__A : Optional[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
__A : Union[str, Any] = tokenizer(
text_target=lowerCamelCase , max_length=32 , padding="""max_length""" , truncation=lowerCamelCase , return_tensors=lowerCamelCase )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
__A : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__A : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__A : Optional[int] = tempfile.mkdtemp()
__A : List[str] = """ He is very happy, UNwant\u00E9d,running"""
__A : str = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
__A : Optional[int] = tokenizer.__class__.from_pretrained(lowerCamelCase )
__A : Optional[int] = after_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
shutil.rmtree(lowerCamelCase )
__A : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__A : List[Any] = tempfile.mkdtemp()
__A : Optional[Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
__A : str = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__A : List[Any] = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
__A : Tuple = tokenizer.__class__.from_pretrained(lowerCamelCase )
__A : Union[str, Any] = after_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__A : List[str] = tokenizer.__class__.from_pretrained(lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCamelCase )
def lowercase_( self : int ):
"""simple docstring"""
__A : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase )
with open(os.path.join(lowerCamelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__A : int = json.load(lowerCamelCase )
with open(os.path.join(lowerCamelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__A : Any = json.load(lowerCamelCase )
__A : str = [f"<extra_id_{i}>" for i in range(1_25 )]
__A : int = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
__A : Any = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(lowerCamelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCamelCase , lowerCamelCase )
with open(os.path.join(lowerCamelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCamelCase , lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__A : int = tokenizer_class.from_pretrained(
lowerCamelCase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__A : int = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=lowerCamelCase )]
__A : str = tokenizer_class.from_pretrained(
lowerCamelCase , additional_special_tokens=lowerCamelCase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def lowercase_( self : Union[str, Any] ):
"""simple docstring"""
__A : Dict = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_78] ) , """�""" )
def lowercase_( self : Optional[int] ):
"""simple docstring"""
pass
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
pass
def lowercase_( self : Any ):
"""simple docstring"""
pass
def lowercase_( self : Union[str, Any] ):
"""simple docstring"""
pass
def lowercase_( self : Optional[int] ):
"""simple docstring"""
__A : Optional[int] = self.get_tokenizers(fast=lowerCamelCase , do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__A : Optional[int] = ["""[CLS]""", """t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """s""", """t""", """[SEP]"""]
__A : List[Any] = tokenizer.convert_tokens_to_string(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
| 499 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__a = logging.get_logger(__name__)
class A__ ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , **lowerCAmelCase__ : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : int = feature_size
_UpperCAmelCase : Optional[Any] = sampling_rate
_UpperCAmelCase : int = padding_value
_UpperCAmelCase : Optional[Any] = kwargs.pop("padding_side" , "right" )
_UpperCAmelCase : Optional[int] = kwargs.pop("return_attention_mask" , _snake_case )
super().__init__(**_snake_case )
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] = True , lowerCAmelCase__ : Dict = None , lowerCAmelCase__ : int = False , lowerCAmelCase__ : List[Any] = None , lowerCAmelCase__ : List[str] = None , lowerCAmelCase__ : List[str] = None , ) -> BatchFeature:
"""simple docstring"""
if isinstance(_snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
_UpperCAmelCase : Optional[Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
_UpperCAmelCase : Union[str, Any] = processed_features[self.model_input_names[0]]
_UpperCAmelCase : Optional[Any] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_snake_case ) == 0:
if return_attention_mask:
_UpperCAmelCase : Optional[Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_UpperCAmelCase : Any = required_input[0]
if isinstance(_snake_case , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_UpperCAmelCase : List[str] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_snake_case ):
_UpperCAmelCase : Optional[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_snake_case ):
_UpperCAmelCase : int = "tf"
elif is_torch_tensor(_snake_case ):
_UpperCAmelCase : str = "pt"
elif isinstance(_snake_case , (int, float, list, tuple, np.ndarray) ):
_UpperCAmelCase : Union[str, Any] = "np"
else:
raise ValueError(
F"""type of {first_element} unknown: {type(_snake_case )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
_UpperCAmelCase : Dict = to_numpy(_snake_case )
else:
_UpperCAmelCase : Union[str, Any] = [to_numpy(_snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
_UpperCAmelCase : Optional[Any] = self._get_padding_strategies(padding=_snake_case , max_length=_snake_case )
_UpperCAmelCase : Optional[int] = processed_features[self.model_input_names[0]]
_UpperCAmelCase : str = len(_snake_case )
if not all(len(_snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
_UpperCAmelCase : List[str] = []
for i in range(_snake_case ):
_UpperCAmelCase : Tuple = {k: v[i] for k, v in processed_features.items()}
# truncation
_UpperCAmelCase : Any = self._truncate(
_snake_case , max_length=_snake_case , pad_to_multiple_of=_snake_case , truncation=_snake_case , )
truncated_inputs.append(_snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_UpperCAmelCase : Optional[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_UpperCAmelCase : str = PaddingStrategy.MAX_LENGTH
_UpperCAmelCase : int = {}
for i in range(_snake_case ):
# padding
_UpperCAmelCase : Optional[Any] = self._pad(
truncated_inputs[i] , max_length=_snake_case , padding_strategy=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , )
for key, value in outputs.items():
if key not in batch_outputs:
_UpperCAmelCase : Any = []
if value.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : List[str] = value.astype(np.floataa )
batch_outputs[key].append(_snake_case )
return BatchFeature(_snake_case , tensor_type=_snake_case )
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any = None , lowerCAmelCase__ : List[str] = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase__ : Tuple = None , lowerCAmelCase__ : str = None , ) -> dict:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_UpperCAmelCase : str = len(_snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_UpperCAmelCase : str = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_UpperCAmelCase : Optional[Any] = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_UpperCAmelCase : Union[str, Any] = np.ones(len(_snake_case ) , dtype=np.intaa )
if needs_to_be_padded:
_UpperCAmelCase : Dict = max_length - len(_snake_case )
if self.padding_side == "right":
if return_attention_mask:
_UpperCAmelCase : Union[str, Any] = np.pad(
processed_features["attention_mask"] , (0, difference) )
_UpperCAmelCase : Union[str, Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_UpperCAmelCase : Optional[Any] = np.pad(
_snake_case , _snake_case , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_UpperCAmelCase : Any = np.pad(
processed_features["attention_mask"] , (difference, 0) )
_UpperCAmelCase : int = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_UpperCAmelCase : Any = np.pad(
_snake_case , _snake_case , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any = None , lowerCAmelCase__ : str = None , lowerCAmelCase__ : List[Any] = None , ) -> Dict:
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
_UpperCAmelCase : Optional[Any] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_UpperCAmelCase : int = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_UpperCAmelCase : str = len(_snake_case ) > max_length
if needs_to_be_truncated:
_UpperCAmelCase : str = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_UpperCAmelCase : str = processed_features["attention_mask"][:max_length]
return processed_features
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : List[str]=None ) -> Optional[Any]:
"""simple docstring"""
if padding is not False:
if padding is True:
_UpperCAmelCase : List[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_snake_case , _snake_case ):
_UpperCAmelCase : Tuple = PaddingStrategy(_snake_case )
elif isinstance(_snake_case , _snake_case ):
_UpperCAmelCase : Optional[Any] = padding
else:
_UpperCAmelCase : str = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy | 494 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
__magic_name__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
__magic_name__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """whisper"""
__SCREAMING_SNAKE_CASE = ["""past_key_values"""]
__SCREAMING_SNAKE_CASE = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , _snake_case=5_1865 , _snake_case=80 , _snake_case=6 , _snake_case=4 , _snake_case=6 , _snake_case=4 , _snake_case=1536 , _snake_case=1536 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=5_0257 , _snake_case=True , _snake_case=True , _snake_case="gelu" , _snake_case=256 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=False , _snake_case=1500 , _snake_case=448 , _snake_case=5_0256 , _snake_case=5_0256 , _snake_case=5_0256 , _snake_case=None , _snake_case=[220, 5_0256] , _snake_case=False , _snake_case=256 , _snake_case=False , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case=7 , **_snake_case , ) -> str:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = num_mel_bins
UpperCAmelCase = d_model
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = max_source_positions
UpperCAmelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase = classifier_proj_size
UpperCAmelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
UpperCAmelCase = median_filter_width
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , suppress_tokens=_snake_case , begin_suppress_tokens=_snake_case , **_snake_case , )
class lowercase ( A__ ):
'''simple docstring'''
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
UpperCAmelCase = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase = {0: '''batch'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''' )
return common_inputs
def snake_case_ ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , _snake_case = 2_2050 , _snake_case = 5.0 , _snake_case = 220 , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCAmelCase = OrderedDict()
UpperCAmelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_snake_case , framework=_snake_case , sampling_rate=_snake_case , time_duration=_snake_case , frequency=_snake_case , )
UpperCAmelCase = encoder_inputs['''input_features'''].shape[2]
UpperCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , _snake_case , _snake_case , _snake_case , _snake_case )
UpperCAmelCase = encoder_inputs.pop('''input_features''' )
UpperCAmelCase = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def snake_case_ ( self ) -> float:
"""simple docstring"""
return 1e-3
| 254 | 0 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
def _lowercase ( a__ : nn.ModuleList , a__ : nn.ModuleList , a__ : List[int] ) -> None:
"""simple docstring"""
_UpperCamelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(a__ ) == len(a__ ), f'''{len(a__ )} != {len(a__ )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__lowerCAmelCase = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__lowerCAmelCase = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def _lowercase ( a__ : List[str] , a__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
try:
_UpperCamelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
f''' {n_student}''' )
return list(range(a__ ) )
def _lowercase ( a__ : Tuple , a__ : Any ) -> List[int]:
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(f'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(a__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _lowercase ( a__ : Union[str, PreTrainedModel] , a__ : Union[str, Path] = "student" , a__ : Union[int, None] = None , a__ : Union[int, None] = None , a__ : List[Any]=False , a__ : Dict=None , a__ : str=None , **a__ : Optional[int] , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
"""simple docstring"""
_UpperCamelCase = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(a__ , a__ ):
AutoTokenizer.from_pretrained(a__ ).save_pretrained(a__ ) # purely for convenience
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(a__ ).eval()
else:
assert isinstance(a__ , a__ ), f'''teacher must be a model or string got type {type(a__ )}'''
_UpperCamelCase = teacher.config.to_diff_dict()
try:
_UpperCamelCase , _UpperCamelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCamelCase = teacher_e
if d is None:
_UpperCamelCase = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
_UpperCamelCase , _UpperCamelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCamelCase , _UpperCamelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCamelCase = teacher_e
if d is None:
_UpperCamelCase = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(a__ )
# Copy weights
_UpperCamelCase = teacher.config_class(**a__ )
_UpperCamelCase = AutoModelForSeqaSeqLM.from_config(a__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCamelCase = student.load_state_dict(teacher.state_dict() , strict=a__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCamelCase , _UpperCamelCase = list(range(a__ ) ), list(range(a__ ) )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
f''' {save_path}''' )
student.save_pretrained(a__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCamelCase = pick_layers_to_copy(a__ , a__ )
if d_layers_to_copy is None:
_UpperCamelCase = pick_layers_to_copy(a__ , a__ )
try:
if hasattr(
a__ , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , a__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , a__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , a__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , a__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , a__ )
copy_layers(teacher.decoder.block , student.decoder.block , a__ )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
_UpperCamelCase = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(a__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 589 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase_ ( lowercase , lowercase ):
@register_to_config
def __init__( self , *,
lowerCamelCase_ = 4 , lowerCamelCase_ = 7_68 , lowerCamelCase_ , lowerCamelCase_ , ) -> int:
"""simple docstring"""
super().__init__()
_UpperCamelCase = nn.Parameter(torch.zeros(lowerCamelCase_ ) )
# parameters for additional clip time embeddings
_UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
# parameters for encoder hidden states
_UpperCamelCase = clip_extra_context_tokens
_UpperCamelCase = nn.Linear(
lowerCamelCase_ , self.clip_extra_context_tokens * cross_attention_dim )
_UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = nn.LayerNorm(lowerCamelCase_ )
def lowercase ( self , *, lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_UpperCamelCase = image_embeddings.shape[0]
_UpperCamelCase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_UpperCamelCase = classifier_free_guidance_embeddings.expand(
lowerCamelCase_ , -1 )
_UpperCamelCase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_UpperCamelCase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_UpperCamelCase = self.embedding_proj(lowerCamelCase_ )
_UpperCamelCase = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase_ )
_UpperCamelCase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_UpperCamelCase = self.clip_extra_context_tokens_proj(lowerCamelCase_ )
_UpperCamelCase = clip_extra_context_tokens.reshape(lowerCamelCase_ , -1 , self.clip_extra_context_tokens )
_UpperCamelCase = clip_extra_context_tokens.permute(0 , 2 , 1 )
_UpperCamelCase = self.encoder_hidden_states_proj(lowerCamelCase_ )
_UpperCamelCase = self.text_encoder_hidden_states_norm(lowerCamelCase_ )
_UpperCamelCase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 589 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE__ : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def a ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
snake_case__ =state_dict.pop(lowercase_ )
snake_case__ =val
def a ( UpperCamelCase_ : str ) -> Union[str, Any]:
snake_case__ =OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case__ =key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
snake_case__ =value
else:
snake_case__ =value
return new_state_dict
def a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : str=False ) -> Dict:
snake_case__ =''''''
if is_panoptic:
snake_case__ ='''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case__ =state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
snake_case__ =state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ =in_proj_weight[:256, :]
snake_case__ =in_proj_bias[:256]
snake_case__ =in_proj_weight[256:512, :]
snake_case__ =in_proj_bias[256:512]
snake_case__ =in_proj_weight[-256:, :]
snake_case__ =in_proj_bias[-256:]
def a ( ) -> List[str]:
snake_case__ ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ =Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] ) -> Any:
snake_case__ =ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case__ ='''resnet101'''
if "dc5" in model_name:
snake_case__ =True
snake_case__ ='''panoptic''' in model_name
if is_panoptic:
snake_case__ =250
else:
snake_case__ =91
snake_case__ ='''huggingface/label-files'''
snake_case__ ='''coco-detection-id2label.json'''
snake_case__ =json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='dataset' ) , 'r' ) )
snake_case__ ={int(lowercase_ ): v for k, v in idalabel.items()}
snake_case__ =idalabel
snake_case__ ={v: k for k, v in idalabel.items()}
# load image processor
snake_case__ ='''coco_panoptic''' if is_panoptic else '''coco_detection'''
snake_case__ =ConditionalDetrImageProcessor(format=lowercase_ )
# prepare image
snake_case__ =prepare_img()
snake_case__ =image_processor(images=lowercase_ , return_tensors='pt' )
snake_case__ =encoding['''pixel_values''']
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
snake_case__ =torch.hub.load('DeppMeng/ConditionalDETR' , lowercase_ , pretrained=lowercase_ ).eval()
snake_case__ =conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case__ ='''conditional_detr.''' + src
rename_key(lowercase_ , lowercase_ , lowercase_ )
snake_case__ =rename_backbone_keys(lowercase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase_ , is_panoptic=lowercase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case__ ='''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
snake_case__ =state_dict.pop(lowercase_ )
snake_case__ =val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case__ =state_dict.pop(lowercase_ )
snake_case__ =val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
snake_case__ =state_dict.pop(lowercase_ )
snake_case__ =val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
snake_case__ =state_dict.pop(lowercase_ )
snake_case__ =val
# finally, create HuggingFace model and load state dict
snake_case__ =ConditionalDetrForSegmentation(lowercase_ ) if is_panoptic else ConditionalDetrForObjectDetection(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
model.push_to_hub(repo_id=lowercase_ , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
snake_case__ =conditional_detr(lowercase_ )
snake_case__ =model(lowercase_ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 538 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : UNetaDModel
_lowerCAmelCase : ScoreSdeVeScheduler
def __init__( self , lowercase__ , lowercase__):
super().__init__()
self.register_modules(unet=lowercase__ , scheduler=lowercase__)
@torch.no_grad()
def __call__( self , lowercase__ = 1 , lowercase__ = 2_0_0_0 , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , **lowercase__ , ):
__UpperCAmelCase : int = self.unet.config.sample_size
__UpperCAmelCase : Optional[int] = (batch_size, 3, img_size, img_size)
__UpperCAmelCase : int = self.unet
__UpperCAmelCase : Tuple = randn_tensor(lowercase__ , generator=lowercase__) * self.scheduler.init_noise_sigma
__UpperCAmelCase : List[str] = sample.to(self.device)
self.scheduler.set_timesteps(lowercase__)
self.scheduler.set_sigmas(lowercase__)
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
__UpperCAmelCase : List[str] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device)
# correction step
for _ in range(self.scheduler.config.correct_steps):
__UpperCAmelCase : int = self.unet(lowercase__ , lowercase__).sample
__UpperCAmelCase : str = self.scheduler.step_correct(lowercase__ , lowercase__ , generator=lowercase__).prev_sample
# prediction step
__UpperCAmelCase : Tuple = model(lowercase__ , lowercase__).sample
__UpperCAmelCase : Dict = self.scheduler.step_pred(lowercase__ , lowercase__ , lowercase__ , generator=lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = output.prev_sample, output.prev_sample_mean
__UpperCAmelCase : List[str] = sample_mean.clamp(0 , 1)
__UpperCAmelCase : Union[str, Any] = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__UpperCAmelCase : List[Any] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowercase__)
| 462 | 0 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
snake_case_ : str = logging.get_logger(__name__)
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = ['''input_features''', '''is_longer''']
def __init__( self , lowerCamelCase__=6_4 , lowerCamelCase__=4_8_0_0_0 , lowerCamelCase__=4_8_0 , lowerCamelCase__=1_0 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=0.0 , lowerCamelCase__=False , lowerCamelCase__ = 0 , lowerCamelCase__ = 1_4_0_0_0 , lowerCamelCase__ = None , lowerCamelCase__ = "fusion" , lowerCamelCase__ = "repeatpad" , **lowerCamelCase__ , ):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , padding_value=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCamelCase = top_db
UpperCamelCase = truncation
UpperCamelCase = padding
UpperCamelCase = fft_window_size
UpperCamelCase = (fft_window_size >> 1) + 1
UpperCamelCase = hop_length
UpperCamelCase = max_length_s
UpperCamelCase = max_length_s * sampling_rate
UpperCamelCase = sampling_rate
UpperCamelCase = frequency_min
UpperCamelCase = frequency_max
UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm=lowerCamelCase__ , mel_scale='''htk''' , )
UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm='''slaney''' , mel_scale='''slaney''' , )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = copy.deepcopy(self.__dict__ )
UpperCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
UpperCamelCase = spectrogram(
lowerCamelCase__ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase__ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCamelCase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCamelCase = [0]
# randomly choose index for each part
UpperCamelCase = np.random.choice(ranges[0] )
UpperCamelCase = np.random.choice(ranges[1] )
UpperCamelCase = np.random.choice(ranges[2] )
UpperCamelCase = mel[idx_front : idx_front + chunk_frames, :]
UpperCamelCase = mel[idx_middle : idx_middle + chunk_frames, :]
UpperCamelCase = mel[idx_back : idx_back + chunk_frames, :]
UpperCamelCase = torch.tensor(mel[None, None, :] )
UpperCamelCase = torch.nn.functional.interpolate(
lowerCamelCase__ , size=[chunk_frames, 6_4] , mode='''bilinear''' , align_corners=lowerCamelCase__ )
UpperCamelCase = mel_shrink[0][0].numpy()
UpperCamelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
UpperCamelCase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
UpperCamelCase = len(lowerCamelCase__ ) - max_length
UpperCamelCase = np.random.randint(0 , overflow + 1 )
UpperCamelCase = waveform[idx : idx + max_length]
UpperCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
UpperCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters )
UpperCamelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
UpperCamelCase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
UpperCamelCase = np.stack([mel, mel, mel, mel] , axis=0 )
UpperCamelCase = False
else:
UpperCamelCase = self._random_mel_fusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
UpperCamelCase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
UpperCamelCase = int(max_length / len(lowerCamelCase__ ) )
UpperCamelCase = np.stack(np.tile(lowerCamelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
UpperCamelCase = int(max_length / len(lowerCamelCase__ ) )
UpperCamelCase = np.stack(np.tile(lowerCamelCase__ , lowerCamelCase__ ) )
UpperCamelCase = np.pad(lowerCamelCase__ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
UpperCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters )
UpperCamelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
UpperCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ):
'''simple docstring'''
UpperCamelCase = truncation if truncation is not None else self.truncation
UpperCamelCase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCamelCase = isinstance(lowerCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
UpperCamelCase = is_batched_numpy or (
isinstance(lowerCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase__ , np.ndarray ):
UpperCamelCase = np.asarray(lowerCamelCase__ , dtype=np.floataa )
elif isinstance(lowerCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase = [np.asarray(lowerCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
UpperCamelCase = [
self._get_input_mel(lowerCamelCase__ , max_length if max_length else self.nb_max_samples , lowerCamelCase__ , lowerCamelCase__ )
for waveform in raw_speech
]
UpperCamelCase = []
UpperCamelCase = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase__ )
is_longer.append(lowerCamelCase__ )
if truncation == "fusion" and sum(lowerCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
UpperCamelCase = np.random.randint(0 , len(lowerCamelCase__ ) )
UpperCamelCase = True
if isinstance(input_mel[0] , lowerCamelCase__ ):
UpperCamelCase = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
UpperCamelCase = [[longer] for longer in is_longer]
UpperCamelCase = {'''input_features''': input_mel, '''is_longer''': is_longer}
UpperCamelCase = BatchFeature(lowerCamelCase__ )
if return_tensors is not None:
UpperCamelCase = input_features.convert_to_tensors(lowerCamelCase__ )
return input_features
| 350 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
snake_case_ : int = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
snake_case_ : Optional[int] = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
snake_case_ : Optional[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def __snake_case ( _UpperCAmelCase : Optional[Any]):
def remove_articles(_UpperCAmelCase : str):
UpperCamelCase = re.compile(R'''\b(a|an|the)\b''', re.UNICODE)
return re.sub(_UpperCAmelCase, ''' ''', _UpperCAmelCase)
def white_space_fix(_UpperCAmelCase : Union[str, Any]):
return " ".join(text.split())
def remove_punc(_UpperCAmelCase : Dict):
UpperCamelCase = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(_UpperCAmelCase : List[str]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase))))
def __snake_case ( _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[str]):
return int(normalize_answer(_UpperCAmelCase) == normalize_answer(_UpperCAmelCase))
def __snake_case ( _UpperCAmelCase : int, _UpperCAmelCase : Optional[int]):
UpperCamelCase = [any(compute_exact(_UpperCAmelCase, _UpperCAmelCase) for ref in refs) for pred, refs in zip(_UpperCAmelCase, _UpperCAmelCase)]
return (sum(_UpperCAmelCase) / len(_UpperCAmelCase)) * 100
def __snake_case ( _UpperCAmelCase : Any, _UpperCAmelCase : Optional[int], _UpperCAmelCase : int, _UpperCAmelCase : List[str]):
UpperCamelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
UpperCamelCase = Counter(_UpperCAmelCase)
UpperCamelCase = Counter(_UpperCAmelCase)
UpperCamelCase = Counter()
for sgram, scount in sgramcounter.items():
UpperCamelCase = scount * numref
UpperCamelCase = Counter(_UpperCAmelCase)
UpperCamelCase = Counter()
for cgram, ccount in cgramcounter.items():
UpperCamelCase = ccount * numref
# KEEP
UpperCamelCase = sgramcounter_rep & cgramcounter_rep
UpperCamelCase = keepgramcounter_rep & rgramcounter
UpperCamelCase = sgramcounter_rep & rgramcounter
UpperCamelCase = 0
UpperCamelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase = 1
UpperCamelCase = 1
if len(_UpperCAmelCase) > 0:
UpperCamelCase = keeptmpscorea / len(_UpperCAmelCase)
if len(_UpperCAmelCase) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
UpperCamelCase = keeptmpscorea / sum(keepgramcounterall_rep.values())
UpperCamelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
UpperCamelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
UpperCamelCase = sgramcounter_rep - cgramcounter_rep
UpperCamelCase = delgramcounter_rep - rgramcounter
UpperCamelCase = sgramcounter_rep - rgramcounter
UpperCamelCase = 0
UpperCamelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase = 1
if len(_UpperCAmelCase) > 0:
UpperCamelCase = deltmpscorea / len(_UpperCAmelCase)
# ADDITION
UpperCamelCase = set(_UpperCAmelCase) - set(_UpperCAmelCase)
UpperCamelCase = set(_UpperCAmelCase) & set(_UpperCAmelCase)
UpperCamelCase = set(_UpperCAmelCase) - set(_UpperCAmelCase)
UpperCamelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase = 1
UpperCamelCase = 1
if len(_UpperCAmelCase) > 0:
UpperCamelCase = addtmpscore / len(_UpperCAmelCase)
if len(_UpperCAmelCase) > 0:
UpperCamelCase = addtmpscore / len(_UpperCAmelCase)
UpperCamelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
UpperCamelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def __snake_case ( _UpperCAmelCase : str, _UpperCAmelCase : Tuple, _UpperCAmelCase : str):
UpperCamelCase = len(_UpperCAmelCase)
UpperCamelCase = ssent.split(''' ''')
UpperCamelCase = csent.split(''' ''')
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
for rsent in rsents:
UpperCamelCase = rsent.split(''' ''')
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
ragramslist.append(_UpperCAmelCase)
for i in range(0, len(_UpperCAmelCase) - 1):
if i < len(_UpperCAmelCase) - 1:
UpperCamelCase = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_UpperCAmelCase)
if i < len(_UpperCAmelCase) - 2:
UpperCamelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_UpperCAmelCase)
if i < len(_UpperCAmelCase) - 3:
UpperCamelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_UpperCAmelCase)
ragramslist.append(_UpperCAmelCase)
ragramslist.append(_UpperCAmelCase)
ragramslist.append(_UpperCAmelCase)
for i in range(0, len(_UpperCAmelCase) - 1):
if i < len(_UpperCAmelCase) - 1:
UpperCamelCase = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_UpperCAmelCase)
if i < len(_UpperCAmelCase) - 2:
UpperCamelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_UpperCAmelCase)
if i < len(_UpperCAmelCase) - 3:
UpperCamelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_UpperCAmelCase)
for i in range(0, len(_UpperCAmelCase) - 1):
if i < len(_UpperCAmelCase) - 1:
UpperCamelCase = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_UpperCAmelCase)
if i < len(_UpperCAmelCase) - 2:
UpperCamelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_UpperCAmelCase)
if i < len(_UpperCAmelCase) - 3:
UpperCamelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_UpperCAmelCase)
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = SARIngram(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase)
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = SARIngram(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase)
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = SARIngram(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase)
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = SARIngram(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = sum([keepascore, keepascore, keepascore, keepascore]) / 4
UpperCamelCase = sum([delascore, delascore, delascore, delascore]) / 4
UpperCamelCase = sum([addascore, addascore, addascore, addascore]) / 4
UpperCamelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def __snake_case ( _UpperCAmelCase : List[str], _UpperCAmelCase : bool = True, _UpperCAmelCase : str = "13a", _UpperCAmelCase : bool = True):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
UpperCamelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__).major >= 2:
UpperCamelCase = sacrebleu.metrics.bleu._get_tokenizer(_UpperCAmelCase)()(_UpperCAmelCase)
else:
UpperCamelCase = sacrebleu.TOKENIZERS[tokenizer]()(_UpperCAmelCase)
elif tokenizer == "moses":
UpperCamelCase = sacremoses.MosesTokenizer().tokenize(_UpperCAmelCase, return_str=_UpperCAmelCase, escape=_UpperCAmelCase)
elif tokenizer == "penn":
UpperCamelCase = sacremoses.MosesTokenizer().penn_tokenize(_UpperCAmelCase, return_str=_UpperCAmelCase)
else:
UpperCamelCase = sentence
if not return_str:
UpperCamelCase = normalized_sent.split()
return normalized_sent
def __snake_case ( _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[Any]):
if not (len(_UpperCAmelCase) == len(_UpperCAmelCase) == len(_UpperCAmelCase)):
raise ValueError('''Sources length must match predictions and references lengths.''')
UpperCamelCase = 0
for src, pred, refs in zip(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase):
sari_score += SARIsent(normalize(_UpperCAmelCase), normalize(_UpperCAmelCase), [normalize(_UpperCAmelCase) for sent in refs])
UpperCamelCase = sari_score / len(_UpperCAmelCase)
return 100 * sari_score
def __snake_case ( _UpperCAmelCase : str, _UpperCAmelCase : Dict, _UpperCAmelCase : Any="exp", _UpperCAmelCase : str=None, _UpperCAmelCase : Optional[Any]=False, _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : List[Any]=False, ):
UpperCamelCase = len(references[0])
if any(len(_UpperCAmelCase) != references_per_prediction for refs in references):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''')
UpperCamelCase = [[refs[i] for refs in references] for i in range(_UpperCAmelCase)]
UpperCamelCase = sacrebleu.corpus_bleu(
_UpperCAmelCase, _UpperCAmelCase, smooth_method=_UpperCAmelCase, smooth_value=_UpperCAmelCase, force=_UpperCAmelCase, lowercase=_UpperCAmelCase, use_effective_order=_UpperCAmelCase, )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = {}
result.update({'''sari''': compute_sari(sources=lowerCamelCase__ , predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
result.update({'''exact''': compute_em(predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
return result
| 350 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowerCAmelCase : Optional[int] = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def a ( cls ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def a ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-model-flax' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-model-flax-org' )
except HTTPError:
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_lowerCAmelCase : List[str] = FlaxBertModel(snake_case__ )
model.push_to_hub('test-model-flax' , use_auth_token=self._token )
_lowerCAmelCase : str = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
_lowerCAmelCase : List[str] = flatten_dict(unfreeze(model.params ) )
_lowerCAmelCase : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_lowerCAmelCase : Optional[int] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case__ , 1E-3 , msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id='test-model-flax' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case__ , repo_id='test-model-flax' , push_to_hub=snake_case__ , use_auth_token=self._token )
_lowerCAmelCase : Tuple = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
_lowerCAmelCase : Dict = flatten_dict(unfreeze(model.params ) )
_lowerCAmelCase : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_lowerCAmelCase : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case__ , 1E-3 , msg=F'{key} not identical' )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_lowerCAmelCase : List[Any] = FlaxBertModel(snake_case__ )
model.push_to_hub('valid_org/test-model-flax-org' , use_auth_token=self._token )
_lowerCAmelCase : Dict = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
_lowerCAmelCase : Any = flatten_dict(unfreeze(model.params ) )
_lowerCAmelCase : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_lowerCAmelCase : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case__ , 1E-3 , msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-model-flax-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
snake_case__ , repo_id='valid_org/test-model-flax-org' , push_to_hub=snake_case__ , use_auth_token=self._token )
_lowerCAmelCase : Tuple = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
_lowerCAmelCase : List[str] = flatten_dict(unfreeze(model.params ) )
_lowerCAmelCase : Any = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_lowerCAmelCase : List[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case__ , 1E-3 , msg=F'{key} not identical' )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : List[Any] = flatten_dict(modela.params )
_lowerCAmelCase : int = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
_lowerCAmelCase : Union[str, Any] = False
return models_are_equal
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
_lowerCAmelCase : Optional[Any] = FlaxBertModel(snake_case__ )
_lowerCAmelCase : str = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(snake_case__ , snake_case__ ) )
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : Optional[int] = FlaxBertModel.from_pretrained(snake_case__ )
_lowerCAmelCase : str = FlaxBertModel.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.assertTrue(check_models_equal(snake_case__ , snake_case__ ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
_lowerCAmelCase : Tuple = FlaxBertModel(snake_case__ )
_lowerCAmelCase : Optional[int] = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(snake_case__ , snake_case__ ) , max_shard_size='10KB' )
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : Optional[int] = FlaxBertModel.from_pretrained(snake_case__ )
_lowerCAmelCase : int = FlaxBertModel.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.assertTrue(check_models_equal(snake_case__ , snake_case__ ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 'bert'
_lowerCAmelCase : Optional[int] = 'hf-internal-testing/tiny-random-bert-subfolder'
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained(snake_case__ )
_lowerCAmelCase : Optional[Any] = FlaxBertModel.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.assertIsNotNone(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 'bert'
_lowerCAmelCase : Tuple = 'hf-internal-testing/tiny-random-bert-sharded-subfolder'
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : Tuple = FlaxBertModel.from_pretrained(snake_case__ )
_lowerCAmelCase : str = FlaxBertModel.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.assertIsNotNone(snake_case__ )
| 444 |
'''simple docstring'''
from __future__ import annotations
from math import gcd
def lowercase (_A , _A = 2 , _A = 1 , _A = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_A , _A , _A ) -> int:
return (pow(_A , 2 ) + step) % modulus
for _ in range(_A ):
# These track the position within the cycle detection logic.
_lowerCAmelCase : Dict = seed
_lowerCAmelCase : int = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
_lowerCAmelCase : str = rand_fn(_A , _A , _A )
_lowerCAmelCase : Optional[int] = rand_fn(_A , _A , _A )
_lowerCAmelCase : Union[str, Any] = rand_fn(_A , _A , _A )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
_lowerCAmelCase : Optional[int] = gcd(hare - tortoise , _A )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
_lowerCAmelCase : Tuple = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""num""",
type=int,
help="""The value to find a divisor of""",
)
parser.add_argument(
"""--attempts""",
type=int,
default=3,
help="""The number of attempts before giving up""",
)
lowerCAmelCase : List[str] = parser.parse_args()
lowerCAmelCase : List[str] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
lowerCAmelCase : Union[str, Any] = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 444 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase_ = """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def a__ ( snake_case , snake_case , snake_case=8 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__SCREAMING_SNAKE_CASE : Dict = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def a__ ( snake_case , snake_case=512 , snake_case=512 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
__SCREAMING_SNAKE_CASE : int = np.array(pil_image.convert('''RGB''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
__SCREAMING_SNAKE_CASE : int = np.transpose(snake_case , [2, 0, 1] )
__SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(snake_case ).unsqueeze(0 )
return image
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Optional[int] , _A : UNetaDConditionModel , _A : DDPMScheduler , _A : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
__SCREAMING_SNAKE_CASE : str = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self : Any , _A : Dict , _A : Any , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = min(int(num_inference_steps * strength ) , _A )
__SCREAMING_SNAKE_CASE : Tuple = max(num_inference_steps - init_timestep , 0 )
__SCREAMING_SNAKE_CASE : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, Any] , _A : Tuple , _A : List[Any] , _A : Optional[int] , _A : Any , _A : Tuple , _A : List[str]=None ):
"""simple docstring"""
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = image.to(device=_A , dtype=_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
__SCREAMING_SNAKE_CASE : Dict = image
else:
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_A )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_A )
]
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat(_A , dim=0 )
else:
__SCREAMING_SNAKE_CASE : Any = self.movq.encode(_A ).latent_dist.sample(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.movq.config.scaling_factor * init_latents
__SCREAMING_SNAKE_CASE : List[str] = torch.cat([init_latents] , dim=0 )
__SCREAMING_SNAKE_CASE : Optional[int] = init_latents.shape
__SCREAMING_SNAKE_CASE : Tuple = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
__SCREAMING_SNAKE_CASE : Tuple = self.scheduler.add_noise(_A , _A , _A )
__SCREAMING_SNAKE_CASE : Optional[int] = init_latents
return latents
def UpperCAmelCase__ ( self : str , _A : int=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__SCREAMING_SNAKE_CASE : str = torch.device(F'''cuda:{gpu_id}''' )
__SCREAMING_SNAKE_CASE : List[str] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def UpperCAmelCase__ ( self : Any , _A : Union[str, Any]=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
__SCREAMING_SNAKE_CASE : List[Any] = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
__SCREAMING_SNAKE_CASE : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self : List[Any] , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : int = 512 , _A : int = 512 , _A : int = 100 , _A : float = 4.0 , _A : float = 0.3 , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[str] = "pil" , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self._execution_device
__SCREAMING_SNAKE_CASE : Optional[Any] = guidance_scale > 1.0
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(_A , dim=0 )
__SCREAMING_SNAKE_CASE : List[str] = image_embeds.shape[0]
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Any = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE : List[Any] = image_embeds.repeat_interleave(_A , dim=0 )
__SCREAMING_SNAKE_CASE : Any = negative_image_embeds.repeat_interleave(_A , dim=0 )
__SCREAMING_SNAKE_CASE : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
if not isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Any = [image]
if not all(isinstance(_A , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'''Input is in incorrect format: {[type(_A ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
__SCREAMING_SNAKE_CASE : Dict = torch.cat([prepare_image(_A , _A , _A ) for i in image] , dim=0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = image.to(dtype=image_embeds.dtype , device=_A )
__SCREAMING_SNAKE_CASE : Any = self.movq.encode(_A )['''latents''']
__SCREAMING_SNAKE_CASE : List[str] = latents.repeat_interleave(_A , dim=0 )
self.scheduler.set_timesteps(_A , device=_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = self.get_timesteps(_A , _A , _A )
__SCREAMING_SNAKE_CASE : Optional[int] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = downscale_height_and_width(_A , _A , self.movq_scale_factor )
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_latents(
_A , _A , _A , _A , image_embeds.dtype , _A , _A )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE : int = {'''image_embeds''': image_embeds}
__SCREAMING_SNAKE_CASE : List[Any] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = variance_pred.chunk(2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.movq.decode(_A , force_not_quantize=_A )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
__SCREAMING_SNAKE_CASE : str = image * 0.5 + 0.5
__SCREAMING_SNAKE_CASE : List[str] = image.clamp(0 , 1 )
__SCREAMING_SNAKE_CASE : str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : Tuple = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 131 |
def a__ ( snake_case , snake_case ):
"""simple docstring"""
return "\n".join(
F'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 131 | 1 |
from math import sqrt
def _UpperCamelCase (a__ :int ):
"""simple docstring"""
assert isinstance(a__ , a__ ) and (
number >= 0
), "'number' must been an int and positive"
UpperCamelCase__ = True
# 0 and 1 are none primes.
if number <= 1:
UpperCamelCase__ = False
for divisor in range(2 , int(round(sqrt(a__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
UpperCamelCase__ = False
break
# precondition
assert isinstance(a__ , a__ ), "'status' must been from type bool"
return status
def _UpperCamelCase (a__ :Tuple ):
"""simple docstring"""
assert isinstance(a__ , a__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
UpperCamelCase__ = list(range(2 , n + 1 ) )
UpperCamelCase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(a__ ) ):
for j in range(i + 1 , len(a__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
UpperCamelCase__ = 0
# filters actual prime numbers.
UpperCamelCase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(a__ , a__ ), "'ans' must been from type list"
return ans
def _UpperCamelCase (a__ :Tuple ):
"""simple docstring"""
assert isinstance(a__ , a__ ) and (n > 2), "'N' must been an int and > 2"
UpperCamelCase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(a__ ):
ans.append(a__ )
# precondition
assert isinstance(a__ , a__ ), "'ans' must been from type list"
return ans
def _UpperCamelCase (a__ :Dict ):
"""simple docstring"""
assert isinstance(a__ , a__ ) and number >= 0, "'number' must been an int and >= 0"
UpperCamelCase__ = [] # this list will be returns of the function.
# potential prime number factors.
UpperCamelCase__ = 2
UpperCamelCase__ = number
if number == 0 or number == 1:
ans.append(a__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(a__ ):
while quotient != 1:
if is_prime(a__ ) and (quotient % factor == 0):
ans.append(a__ )
quotient /= factor
else:
factor += 1
else:
ans.append(a__ )
# precondition
assert isinstance(a__ , a__ ), "'ans' must been from type list"
return ans
def _UpperCamelCase (a__ :Any ):
"""simple docstring"""
assert isinstance(a__ , a__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCamelCase__ = 0
# prime factorization of 'number'
UpperCamelCase__ = prime_factorization(a__ )
UpperCamelCase__ = max(a__ )
# precondition
assert isinstance(a__ , a__ ), "'ans' must been from type int"
return ans
def _UpperCamelCase (a__ :Union[str, Any] ):
"""simple docstring"""
assert isinstance(a__ , a__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCamelCase__ = 0
# prime factorization of 'number'
UpperCamelCase__ = prime_factorization(a__ )
UpperCamelCase__ = min(a__ )
# precondition
assert isinstance(a__ , a__ ), "'ans' must been from type int"
return ans
def _UpperCamelCase (a__ :List[str] ):
"""simple docstring"""
assert isinstance(a__ , a__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , a__ ), "compare bust been from type bool"
return number % 2 == 0
def _UpperCamelCase (a__ :str ):
"""simple docstring"""
assert isinstance(a__ , a__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , a__ ), "compare bust been from type bool"
return number % 2 != 0
def _UpperCamelCase (a__ :str ):
"""simple docstring"""
assert (
isinstance(a__ , a__ ) and (number > 2) and is_even(a__ )
), "'number' must been an int, even and > 2"
UpperCamelCase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
UpperCamelCase__ = get_prime_numbers(a__ )
UpperCamelCase__ = len(a__ )
# run variable for while-loops.
UpperCamelCase__ = 0
UpperCamelCase__ = None
# exit variable. for break up the loops
UpperCamelCase__ = True
while i < len_pn and loop:
UpperCamelCase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
UpperCamelCase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(a__ , a__ )
and (len(a__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _UpperCamelCase (a__ :Union[str, Any] , a__ :List[str] ):
"""simple docstring"""
assert (
isinstance(a__ , a__ )
and isinstance(a__ , a__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
UpperCamelCase__ = 0
while numbera != 0:
UpperCamelCase__ = numbera % numbera
UpperCamelCase__ = numbera
UpperCamelCase__ = rest
# precondition
assert isinstance(a__ , a__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _UpperCamelCase (a__ :Optional[Any] , a__ :List[Any] ):
"""simple docstring"""
assert (
isinstance(a__ , a__ )
and isinstance(a__ , a__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
UpperCamelCase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
UpperCamelCase__ = prime_factorization(a__ )
UpperCamelCase__ = prime_factorization(a__ )
elif numbera == 1 or numbera == 1:
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = max(a__ , a__ )
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
UpperCamelCase__ = prime_fac_a.count(a__ )
UpperCamelCase__ = prime_fac_a.count(a__ )
for _ in range(max(a__ , a__ ) ):
ans *= n
else:
UpperCamelCase__ = prime_fac_a.count(a__ )
for _ in range(a__ ):
ans *= n
done.append(a__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
UpperCamelCase__ = prime_fac_a.count(a__ )
for _ in range(a__ ):
ans *= n
done.append(a__ )
# precondition
assert isinstance(a__ , a__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _UpperCamelCase (a__ :Optional[Any] ):
"""simple docstring"""
assert isinstance(a__ , a__ ) and (n >= 0), "'number' must been a positive int"
UpperCamelCase__ = 0
UpperCamelCase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(a__ ):
ans += 1
# precondition
assert isinstance(a__ , a__ ) and is_prime(
a__ ), "'ans' must been a prime number and from type int"
return ans
def _UpperCamelCase (a__ :Optional[Any] , a__ :List[str] ):
"""simple docstring"""
assert (
is_prime(a__ ) and is_prime(a__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
UpperCamelCase__ = p_number_a + 1 # jump to the next number
UpperCamelCase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(a__ ):
number += 1
while number < p_number_a:
ans.append(a__ )
number += 1
# fetch the next prime number.
while not is_prime(a__ ):
number += 1
# precondition
assert (
isinstance(a__ , a__ )
and ans[0] != p_number_a
and ans[len(a__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _UpperCamelCase (a__ :Any ):
"""simple docstring"""
assert isinstance(a__ , a__ ) and (n >= 1), "'n' must been int and >= 1"
UpperCamelCase__ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(a__ )
# precondition
assert ans[0] == 1 and ans[len(a__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _UpperCamelCase (a__ :List[Any] ):
"""simple docstring"""
assert isinstance(a__ , a__ ) and (
number > 1
), "'number' must been an int and >= 1"
UpperCamelCase__ = get_divisors(a__ )
# precondition
assert (
isinstance(a__ , a__ )
and (divisors[0] == 1)
and (divisors[len(a__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _UpperCamelCase (a__ :List[Any] , a__ :Any ):
"""simple docstring"""
assert (
isinstance(a__ , a__ )
and isinstance(a__ , a__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
UpperCamelCase__ = gcd(abs(a__ ) , abs(a__ ) )
# precondition
assert (
isinstance(a__ , a__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _UpperCamelCase (a__ :Any ):
"""simple docstring"""
assert isinstance(a__ , a__ ) and (n >= 0), "'n' must been a int and >= 0"
UpperCamelCase__ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _UpperCamelCase (a__ :Tuple ):
"""simple docstring"""
assert isinstance(a__ , a__ ) and (n >= 0), "'n' must been an int and >= 0"
UpperCamelCase__ = 0
UpperCamelCase__ = 1
UpperCamelCase__ = 1 # this will be return
for _ in range(n - 1 ):
UpperCamelCase__ = ans
ans += fiba
UpperCamelCase__ = tmp
return ans
| 619 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case : List[str] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
UpperCamelCase__ = VideoClassificationPipeline(model=__lowerCAmelCase , image_processor=__lowerCAmelCase , top_k=2 )
UpperCamelCase__ = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
for example in examples:
UpperCamelCase__ = video_classifier(__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{"""score""": ANY(__lowerCAmelCase ), """label""": ANY(__lowerCAmelCase )},
{"""score""": ANY(__lowerCAmelCase ), """label""": ANY(__lowerCAmelCase )},
] , )
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
UpperCamelCase__ = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
UpperCamelCase__ = pipeline(
"""video-classification""" , model=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , frame_sampling_rate=4 )
UpperCamelCase__ = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
UpperCamelCase__ = video_classifier(__lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}] , )
UpperCamelCase__ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
] , )
@require_tf
def _lowerCamelCase ( self ):
pass
| 619 | 1 |
from collections.abc import Sequence
def __A ( _A = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
__a = nums[0]
for i in range(1 , len(_A ) ):
__a = nums[i]
__a = max(_A , ans + num , _A )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
SCREAMING_SNAKE_CASE : Optional[Any] = int(input("""Enter number of elements : """).strip())
SCREAMING_SNAKE_CASE : Union[str, Any] = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 701 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = """xlm-roberta-xl"""
def __init__( self : str , __SCREAMING_SNAKE_CASE : int=25_08_80 , __SCREAMING_SNAKE_CASE : Dict=25_60 , __SCREAMING_SNAKE_CASE : List[str]=36 , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : int=1_02_40 , __SCREAMING_SNAKE_CASE : Optional[int]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : int=5_14 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : Any=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=1E-05 , __SCREAMING_SNAKE_CASE : str=1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[int]="absolute" , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : str , ):
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = classifier_dropout
class A_ ( a_ ):
@property
def _UpperCAmelCase ( self : List[str] ):
if self.task == "multiple-choice":
__a = {0: "batch", 1: "choice", 2: "sequence"}
else:
__a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 525 | 0 |
lowercase : int = 8.314_4598
def lowerCAmelCase__ ( _a : float , _a : float ):
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowercase : Optional[Any] = 3_00
lowercase : Union[str, Any] = 28
lowercase : str = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 568 |
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "embed_dim" ) )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "num_heads" ) )
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=[16, 48, 96] , _SCREAMING_SNAKE_CASE=[1, 3, 6] , _SCREAMING_SNAKE_CASE=[1, 2, 10] , _SCREAMING_SNAKE_CASE=[7, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2] , _SCREAMING_SNAKE_CASE=[2, 1, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 2] , _SCREAMING_SNAKE_CASE=[False, False, True] , _SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-12 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=2 , ) -> Optional[Any]:
snake_case_ : List[str] = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : Any = image_size
snake_case_ : Dict = patch_sizes
snake_case_ : Union[str, Any] = patch_stride
snake_case_ : List[str] = patch_padding
snake_case_ : str = is_training
snake_case_ : Tuple = use_labels
snake_case_ : Dict = num_labels
snake_case_ : Tuple = num_channels
snake_case_ : Tuple = embed_dim
snake_case_ : str = num_heads
snake_case_ : Any = stride_kv
snake_case_ : Optional[int] = depth
snake_case_ : Any = cls_token
snake_case_ : Tuple = attention_drop_rate
snake_case_ : Dict = initializer_range
snake_case_ : Any = layer_norm_eps
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Optional[Any] = None
if self.use_labels:
snake_case_ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ : str = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ) -> Any:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ : Optional[Any] = CvtModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = (self.image_size, self.image_size)
snake_case_ , snake_case_ : Any = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case_ : Optional[int] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case_ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
snake_case_ : List[Any] = self.num_labels
snake_case_ : int = CvtForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : int = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : Optional[Any] = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
A : List[Any] = (
{'feature-extraction': CvtModel, 'image-classification': CvtForImageClassification}
if is_torch_available()
else {}
)
A : str = False
A : List[Any] = False
A : Union[str, Any] = False
A : Optional[int] = False
A : Optional[int] = False
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : Optional[int] = CvtModelTester(self )
snake_case_ : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowerCAmelCase ( self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self ) -> int:
return
@unittest.skip(reason="Cvt does not output attentions" )
def _lowerCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def _lowerCAmelCase ( self ) -> str:
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def _lowerCAmelCase ( self ) -> Dict:
pass
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ , snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[str] = model_class(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : str = [*signature.parameters.keys()]
snake_case_ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ : Dict = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
snake_case_ : List[Any] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
snake_case_ : Union[str, Any] = outputs.hidden_states
snake_case_ : List[str] = len(self.model_tester.depth )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case_ , snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ : int = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowerCAmelCase ( self ) -> Any:
pass
@slow
def _lowerCAmelCase ( self ) -> Optional[int]:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[Any] = CvtModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( ):
snake_case_ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ) -> int:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Tuple = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = self.default_image_processor
snake_case_ : Optional[Any] = prepare_img()
snake_case_ : List[str] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
snake_case_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
snake_case_ : int = torch.tensor([0.9285, 0.9015, -0.3150] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 568 | 1 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__lowerCamelCase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
def A_ ( _lowerCAmelCase ) -> Any:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_lowerCAmelCase ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : Optional[int] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
UpperCamelCase : Optional[int] = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
UpperCamelCase : int = PipelineDataFormat.from_str(
format=_lowerCAmelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(_lowerCAmelCase , _lowerCAmelCase )
class A__ ( __snake_case ):
def __init__( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = nlp
UpperCamelCase : Tuple = reader
@staticmethod
def __UpperCamelCase( A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = parser.add_parser("run" , help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" )
run_parser.add_argument("--input" , type=A_ , help="Path to the file to use for inference" )
run_parser.add_argument("--output" , type=A_ , help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" , type=A_ , help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" , type=A_ , help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer" , type=A_ , help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" , type=A_ , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=A_ , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=A_ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." )
run_parser.set_defaults(func=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : List[Any] = self._nlp, []
for entry in self._reader:
UpperCamelCase : Tuple = nlp(**A_ ) if self._reader.is_multi_columns else nlp(A_ )
if isinstance(A_ , A_ ):
outputs.append(A_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
UpperCamelCase : Optional[Any] = self._reader.save_binary(A_ )
logger.warning(F"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(A_ )
| 38 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A_ ( ) -> Dict:
UpperCamelCase : Tuple = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_lowerCAmelCase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_lowerCAmelCase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_lowerCAmelCase )
return parser.parse_args()
def A_ ( ) -> Optional[int]:
UpperCamelCase : Tuple = parse_args()
# Import training_script as a module.
UpperCamelCase : Union[str, Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCamelCase : List[Any] = script_fpath.stem
UpperCamelCase : Optional[Any] = importlib.import_module(_lowerCAmelCase )
# Patch sys.argv
UpperCamelCase : List[Any] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 38 | 1 |
import functools
def _lowerCamelCase ( snake_case , snake_case ):
# Validation
if not isinstance(snake_case , snake_case ) or not all(isinstance(snake_case , snake_case ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(snake_case ) != 3 or not all(isinstance(snake_case , snake_case ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(snake_case ) == 0:
return 0
if min(snake_case ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(snake_case ) >= 366:
raise ValueError('All days elements should be less than 366' )
_lowerCAmelCase = set(snake_case )
@functools.cache
def dynamic_programming(snake_case ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 192 | from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowercase: Any = logging.get_logger(__name__)
# General docstring
_lowercase: List[Any] = '''RegNetConfig'''
# Base docstring
_lowercase: List[Any] = '''facebook/regnet-y-040'''
_lowercase: int = [1, 1_0_8_8, 7, 7]
# Image classification docstring
_lowercase: Union[str, Any] = '''facebook/regnet-y-040'''
_lowercase: Tuple = '''tabby, tabby cat'''
_lowercase: str = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , lowercase__ : int , lowercase__ : int , lowercase__ : int = 3 , lowercase__ : int = 1 , lowercase__ : int = 1 , lowercase__ : Optional[str] = "relu" , ):
super().__init__()
_lowerCAmelCase = nn.Convad(
lowercase__ , lowercase__ , kernel_size=lowercase__ , stride=lowercase__ , padding=kernel_size // 2 , groups=lowercase__ , bias=lowercase__ , )
_lowerCAmelCase = nn.BatchNormad(lowercase__ )
_lowerCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self : str , lowercase__ : Union[str, Any] ):
_lowerCAmelCase = self.convolution(lowercase__ )
_lowerCAmelCase = self.normalization(lowercase__ )
_lowerCAmelCase = self.activation(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : int , lowercase__ : RegNetConfig ):
super().__init__()
_lowerCAmelCase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
_lowerCAmelCase = config.num_channels
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : Optional[int] ):
_lowerCAmelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_lowerCAmelCase = self.embedder(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Optional[Any] , lowercase__ : int , lowercase__ : int , lowercase__ : int = 2 ):
super().__init__()
_lowerCAmelCase = nn.Convad(lowercase__ , lowercase__ , kernel_size=1 , stride=lowercase__ , bias=lowercase__ )
_lowerCAmelCase = nn.BatchNormad(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : Tensor ):
_lowerCAmelCase = self.convolution(lowercase__ )
_lowerCAmelCase = self.normalization(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Optional[Any] , lowercase__ : int , lowercase__ : int ):
super().__init__()
_lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
_lowerCAmelCase = nn.Sequential(
nn.Convad(lowercase__ , lowercase__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase__ , lowercase__ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : List[Any] ):
# b c h w -> b c 1 1
_lowerCAmelCase = self.pooler(lowercase__ )
_lowerCAmelCase = self.attention(lowercase__ )
_lowerCAmelCase = hidden_state * attention
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , lowercase__ : RegNetConfig , lowercase__ : int , lowercase__ : int , lowercase__ : int = 1 ):
super().__init__()
_lowerCAmelCase = in_channels != out_channels or stride != 1
_lowerCAmelCase = max(1 , out_channels // config.groups_width )
_lowerCAmelCase = (
RegNetShortCut(lowercase__ , lowercase__ , stride=lowercase__ ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase = nn.Sequential(
RegNetConvLayer(lowercase__ , lowercase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase__ , lowercase__ , stride=lowercase__ , groups=lowercase__ , activation=config.hidden_act ) , RegNetConvLayer(lowercase__ , lowercase__ , kernel_size=1 , activation=lowercase__ ) , )
_lowerCAmelCase = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : Any ):
_lowerCAmelCase = hidden_state
_lowerCAmelCase = self.layer(lowercase__ )
_lowerCAmelCase = self.shortcut(lowercase__ )
hidden_state += residual
_lowerCAmelCase = self.activation(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Any , lowercase__ : RegNetConfig , lowercase__ : int , lowercase__ : int , lowercase__ : int = 1 ):
super().__init__()
_lowerCAmelCase = in_channels != out_channels or stride != 1
_lowerCAmelCase = max(1 , out_channels // config.groups_width )
_lowerCAmelCase = (
RegNetShortCut(lowercase__ , lowercase__ , stride=lowercase__ ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase = nn.Sequential(
RegNetConvLayer(lowercase__ , lowercase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase__ , lowercase__ , stride=lowercase__ , groups=lowercase__ , activation=config.hidden_act ) , RegNetSELayer(lowercase__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase__ , lowercase__ , kernel_size=1 , activation=lowercase__ ) , )
_lowerCAmelCase = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : Tuple ):
_lowerCAmelCase = hidden_state
_lowerCAmelCase = self.layer(lowercase__ )
_lowerCAmelCase = self.shortcut(lowercase__ )
hidden_state += residual
_lowerCAmelCase = self.activation(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , lowercase__ : RegNetConfig , lowercase__ : int , lowercase__ : int , lowercase__ : int = 2 , lowercase__ : int = 2 , ):
super().__init__()
_lowerCAmelCase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
_lowerCAmelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase__ , lowercase__ , lowercase__ , stride=lowercase__ , ) , *[layer(lowercase__ , lowercase__ , lowercase__ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : Any ):
_lowerCAmelCase = self.layers(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , lowercase__ : RegNetConfig ):
super().__init__()
_lowerCAmelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_lowerCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase__ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase__ , lowercase__ , lowercase__ , depth=lowercase__ ) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , lowercase__ : Tensor , lowercase__ : bool = False , lowercase__ : bool = True ):
_lowerCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase = hidden_states + (hidden_state,)
_lowerCAmelCase = stage_module(lowercase__ )
if output_hidden_states:
_lowerCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase__ , hidden_states=lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ):
UpperCamelCase__ =RegNetConfig
UpperCamelCase__ ="regnet"
UpperCamelCase__ ="pixel_values"
UpperCamelCase__ =True
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : List[Any] ):
if isinstance(lowercase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : List[str] , lowercase__ : List[Any]=False ):
if isinstance(lowercase__ , lowercase__ ):
_lowerCAmelCase = value
_lowercase: Optional[Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowercase: str = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : List[str] , lowercase__ : int ):
super().__init__(lowercase__ )
_lowerCAmelCase = config
_lowerCAmelCase = RegNetEmbeddings(lowercase__ )
_lowerCAmelCase = RegNetEncoder(lowercase__ )
_lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : Tensor , lowercase__ : Optional[bool] = None , lowercase__ : Optional[bool] = None ):
_lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase = self.embedder(lowercase__ )
_lowerCAmelCase = self.encoder(
lowercase__ , output_hidden_states=lowercase__ , return_dict=lowercase__ )
_lowerCAmelCase = encoder_outputs[0]
_lowerCAmelCase = self.pooler(lowercase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase__ , pooler_output=lowercase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : str , lowercase__ : Union[str, Any] ):
super().__init__(lowercase__ )
_lowerCAmelCase = config.num_labels
_lowerCAmelCase = RegNetModel(lowercase__ )
# classification head
_lowerCAmelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Optional[torch.FloatTensor] = None , lowercase__ : Optional[torch.LongTensor] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[bool] = None , ):
_lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase = self.regnet(lowercase__ , output_hidden_states=lowercase__ , return_dict=lowercase__ )
_lowerCAmelCase = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase = self.classifier(lowercase__ )
_lowerCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowerCAmelCase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowerCAmelCase = 'single_label_classification'
else:
_lowerCAmelCase = 'multi_label_classification'
if self.config.problem_type == "regression":
_lowerCAmelCase = MSELoss()
if self.num_labels == 1:
_lowerCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_lowerCAmelCase = loss_fct(lowercase__ , lowercase__ )
elif self.config.problem_type == "single_label_classification":
_lowerCAmelCase = CrossEntropyLoss()
_lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowerCAmelCase = BCEWithLogitsLoss()
_lowerCAmelCase = loss_fct(lowercase__ , lowercase__ )
if not return_dict:
_lowerCAmelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase__ , logits=lowercase__ , hidden_states=outputs.hidden_states )
| 192 | 1 |
import requests
_UpperCAmelCase = "" # <-- Put your OpenWeatherMap appid here!
_UpperCAmelCase = "https://api.openweathermap.org/data/2.5/"
def _lowerCamelCase ( _a = "Chicago" , _a = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + '''weather''' , params=locals() ).json()
def _lowerCamelCase ( _a = "Kolkata, India" , _a = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + '''forecast''' , params=locals() ).json()
def _lowerCamelCase ( _a = 55.68 , _a = 12.57 , _a = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + '''onecall''' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
_UpperCAmelCase = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 297 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
_UpperCamelCase = "xmod"
def __init__( self , a__=3_05_22 , a__=7_68 , a__=12 , a__=12 , a__=30_72 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=2 , a__=0.02 , a__=1E-12 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , a__=None , a__=False , a__=2 , a__=False , a__=True , a__=True , a__=("en_XX",) , a__=None , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = hidden_act
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = position_embedding_type
_lowerCamelCase = use_cache
_lowerCamelCase = classifier_dropout
_lowerCamelCase = pre_norm
_lowerCamelCase = adapter_reduction_factor
_lowerCamelCase = adapter_layer_norm
_lowerCamelCase = adapter_reuse_layer_norm
_lowerCamelCase = ln_before_adapter
_lowerCamelCase = list(a__ )
_lowerCamelCase = default_language
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ):
if self.task == "multiple-choice":
_lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 297 | 1 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__(lowercase_ , unittest.TestCase ):
"""simple docstring"""
lowercase_ = OpenAIGPTTokenizer
lowercase_ = OpenAIGPTTokenizerFast
lowercase_ = True
lowercase_ = False
def snake_case ( self : int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : int = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowercase__ : Dict = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
lowercase__ : Union[str, Any] = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(_lowercase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(_lowercase ) )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int ):
return "lower newer", "lower newer"
def snake_case ( self : List[Any] ):
lowercase__ : str = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowercase__ : Optional[int] = "lower"
lowercase__ : Union[str, Any] = ["low", "er</w>"]
lowercase__ : Optional[int] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
lowercase__ : int = tokens + ["<unk>"]
lowercase__ : Optional[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Any = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
# Simple input
lowercase__ : str = "This is a simple input"
lowercase__ : List[Any] = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Optional[int] = ("This is a simple input", "This is a pair")
lowercase__ : List[str] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_lowercase , tokenizer_r.encode , _lowercase , max_length=_lowercase , padding="max_length" )
# Simple input
self.assertRaises(_lowercase , tokenizer_r.encode_plus , _lowercase , max_length=_lowercase , padding="max_length" )
# Simple input
self.assertRaises(
_lowercase , tokenizer_r.batch_encode_plus , _lowercase , max_length=_lowercase , padding="max_length" , )
# Pair input
self.assertRaises(_lowercase , tokenizer_r.encode , _lowercase , max_length=_lowercase , padding="max_length" )
# Pair input
self.assertRaises(_lowercase , tokenizer_r.encode_plus , _lowercase , max_length=_lowercase , padding="max_length" )
# Pair input
self.assertRaises(
_lowercase , tokenizer_r.batch_encode_plus , _lowercase , max_length=_lowercase , padding="max_length" , )
def snake_case ( self : Optional[int] ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class snake_case__(lowercase_ ):
"""simple docstring"""
pass
| 496 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
SCREAMING_SNAKE_CASE : str = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
class snake_case ( lowercase_ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["""input_ids""", """attention_mask"""]
_a = BartTokenizer
def __init__( self, _lowercase=None, _lowercase=None, _lowercase=None, _lowercase="replace", _lowercase="<s>", _lowercase="</s>", _lowercase="</s>", _lowercase="<s>", _lowercase="<unk>", _lowercase="<pad>", _lowercase="<mask>", _lowercase=False, _lowercase=True, **_lowercase, ) -> Dict:
super().__init__(
_lowercase, _lowercase, tokenizer_file=_lowercase, errors=_lowercase, bos_token=_lowercase, eos_token=_lowercase, sep_token=_lowercase, cls_token=_lowercase, unk_token=_lowercase, pad_token=_lowercase, mask_token=_lowercase, add_prefix_space=_lowercase, trim_offsets=_lowercase, **_lowercase, )
SCREAMING_SNAKE_CASE_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', _lowercase ) != add_prefix_space:
SCREAMING_SNAKE_CASE_ = getattr(_lowercase, pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ = add_prefix_space
SCREAMING_SNAKE_CASE_ = pre_tok_class(**_lowercase )
SCREAMING_SNAKE_CASE_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE_ = 'post_processor'
SCREAMING_SNAKE_CASE_ = getattr(self.backend_tokenizer, _lowercase, _lowercase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE_ = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE_ = tuple(state['cls'] )
SCREAMING_SNAKE_CASE_ = False
if state.get('add_prefix_space', _lowercase ) != add_prefix_space:
SCREAMING_SNAKE_CASE_ = add_prefix_space
SCREAMING_SNAKE_CASE_ = True
if state.get('trim_offsets', _lowercase ) != trim_offsets:
SCREAMING_SNAKE_CASE_ = trim_offsets
SCREAMING_SNAKE_CASE_ = True
if changes_to_apply:
SCREAMING_SNAKE_CASE_ = getattr(_lowercase, state.pop('type' ) )
SCREAMING_SNAKE_CASE_ = component_class(**_lowercase )
setattr(self.backend_tokenizer, _lowercase, _lowercase )
@property
def a__ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def a__ ( self, _lowercase ) -> Dict:
SCREAMING_SNAKE_CASE_ = AddedToken(_lowercase, lstrip=_lowercase, rstrip=_lowercase ) if isinstance(_lowercase, _lowercase ) else value
SCREAMING_SNAKE_CASE_ = value
def a__ ( self, *_lowercase, **_lowercase ) -> BatchEncoding:
SCREAMING_SNAKE_CASE_ = kwargs.get('is_split_into_words', _lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*_lowercase, **_lowercase )
def a__ ( self, *_lowercase, **_lowercase ) -> BatchEncoding:
SCREAMING_SNAKE_CASE_ = kwargs.get('is_split_into_words', _lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*_lowercase, **_lowercase )
def a__ ( self, _lowercase, _lowercase = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE_ = self._tokenizer.model.save(_lowercase, name=_lowercase )
return tuple(_lowercase )
def a__ ( self, _lowercase, _lowercase=None ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a__ ( self, _lowercase, _lowercase = None ) -> List[int]:
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 294 | 0 |
'''simple docstring'''
import baseaa
def __UpperCAmelCase ( a_: str ):
return baseaa.baaencode(string.encode("utf-8" ) )
def __UpperCAmelCase ( a_: bytes ):
return baseaa.baadecode(a_ ).decode("utf-8" )
if __name__ == "__main__":
__a = 'Hello World!'
__a = baseaa_encode(test)
print(encoded)
__a = baseaa_decode(encoded)
print(decoded)
| 714 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 257 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase_ ( lowerCamelCase ):
a__ = 42
a__ = 42
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self , __lowerCAmelCase = 1 , __lowerCAmelCase = 2_0_0_0 , __lowerCAmelCase = None , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :int = self.unet.config.sample_size
__magic_name__ :Optional[Any] = (batch_size, 3, img_size, img_size)
__magic_name__ :Tuple = self.unet
__magic_name__ :Dict = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase ) * self.scheduler.init_noise_sigma
__magic_name__ :str = sample.to(self.device )
self.scheduler.set_timesteps(__lowerCAmelCase )
self.scheduler.set_sigmas(__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__magic_name__ :Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__magic_name__ :List[Any] = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
__magic_name__ :Tuple = self.scheduler.step_correct(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
# prediction step
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase , __lowerCAmelCase ).sample
__magic_name__ :Dict = self.scheduler.step_pred(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase )
__magic_name__ , __magic_name__ :List[str] = output.prev_sample, output.prev_sample_mean
__magic_name__ :Tuple = sample_mean.clamp(0 , 1 )
__magic_name__ :List[Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__magic_name__ :Union[str, Any] = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 0 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowercase__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Optional[Any] , _lowercase : float , _lowercase : Callable , _lowercase : int , _lowercase : float = 1.0 , _lowercase : str = None , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = initial_learning_rate
UpperCAmelCase__ = warmup_steps
UpperCAmelCase__ = power
UpperCAmelCase__ = decay_schedule_fn
UpperCAmelCase__ = name
def __call__( self : int , _lowercase : List[str] ):
"""simple docstring"""
with tf.name_scope(self.name or "WarmUp" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
UpperCAmelCase__ = tf.cast(_lowercase , tf.floataa )
UpperCAmelCase__ = tf.cast(self.warmup_steps , tf.floataa )
UpperCAmelCase__ = global_step_float / warmup_steps_float
UpperCAmelCase__ = self.initial_learning_rate * tf.math.pow(_lowercase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_lowercase , )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __UpperCAmelCase ( __A , __A , __A , __A = 0.0 , __A = 0.9 , __A = 0.999 , __A = 1E-8 , __A = None , __A = None , __A = 0.0 , __A = 1.0 , __A = None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__A , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__A , )
if num_warmup_steps:
UpperCAmelCase__ = WarmUp(
initial_learning_rate=__A , decay_schedule_fn=__A , warmup_steps=__A , )
if weight_decay_rate > 0.0:
UpperCAmelCase__ = AdamWeightDecay(
learning_rate=__A , weight_decay_rate=__A , beta_a=__A , beta_a=__A , epsilon=__A , clipnorm=__A , global_clipnorm=__A , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=__A , )
else:
UpperCAmelCase__ = tf.keras.optimizers.Adam(
learning_rate=__A , beta_a=__A , beta_a=__A , epsilon=__A , clipnorm=__A , global_clipnorm=__A , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Dict , _lowercase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.0_0_1 , _lowercase : float = 0.9 , _lowercase : float = 0.9_9_9 , _lowercase : float = 1E-7 , _lowercase : bool = False , _lowercase : float = 0.0 , _lowercase : Optional[List[str]] = None , _lowercase : Optional[List[str]] = None , _lowercase : str = "AdamWeightDecay" , **_lowercase : int , ):
"""simple docstring"""
super().__init__(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , **_lowercase )
UpperCAmelCase__ = weight_decay_rate
UpperCAmelCase__ = include_in_weight_decay
UpperCAmelCase__ = exclude_from_weight_decay
@classmethod
def _UpperCAmelCase ( cls : Dict , _lowercase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = {"WarmUp": WarmUp}
return super(_lowercase , cls ).from_config(_lowercase , custom_objects=_lowercase )
def _UpperCAmelCase ( self : Optional[int] , _lowercase : str , _lowercase : Tuple , _lowercase : List[str] ):
"""simple docstring"""
super(_lowercase , self )._prepare_local(_lowercase , _lowercase , _lowercase )
UpperCAmelCase__ = tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate" )
def _UpperCAmelCase ( self : List[Any] , _lowercase : List[str] , _lowercase : int , _lowercase : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def _UpperCAmelCase ( self : Dict , _lowercase : List[Any] , _lowercase : Union[str, Any]=None , **_lowercase : Any ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = list(zip(*_lowercase ) )
return super(_lowercase , self ).apply_gradients(zip(_lowercase , _lowercase ) , name=_lowercase , **_lowercase )
def _UpperCAmelCase ( self : str , _lowercase : List[str] , _lowercase : Tuple , _lowercase : Optional[int] ):
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
UpperCAmelCase__ = apply_state or {}
UpperCAmelCase__ = apply_state.get((var_device, var_dtype) )
if coefficients is None:
UpperCAmelCase__ = self._fallback_apply_state(_lowercase , _lowercase )
UpperCAmelCase__ = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : List[str] , _lowercase : List[Any] , _lowercase : Any=None ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self._get_lr(var.device , var.dtype.base_dtype , _lowercase )
UpperCAmelCase__ = self._decay_weights_op(_lowercase , _lowercase , _lowercase )
with tf.control_dependencies([decay] ):
return super(_lowercase , self )._resource_apply_dense(_lowercase , _lowercase , **_lowercase )
def _UpperCAmelCase ( self : List[str] , _lowercase : Tuple , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : List[Any]=None ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self._get_lr(var.device , var.dtype.base_dtype , _lowercase )
UpperCAmelCase__ = self._decay_weights_op(_lowercase , _lowercase , _lowercase )
with tf.control_dependencies([decay] ):
return super(_lowercase , self )._resource_apply_sparse(_lowercase , _lowercase , _lowercase , **_lowercase )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate} )
return config
def _UpperCAmelCase ( self : Union[str, Any] , _lowercase : Dict ):
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_lowercase , _lowercase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_lowercase , _lowercase ) is not None:
return False
return True
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : str ):
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = None
@property
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
if self._accum_steps is None:
UpperCAmelCase__ = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Any , _lowercase : List[str] ):
"""simple docstring"""
if not self._gradients:
UpperCAmelCase__ = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_lowercase ) , trainable=_lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_lowercase ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(_lowercase )}""" )
for accum_gradient, gradient in zip(self._gradients , _lowercase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_lowercase )
self._accum_steps.assign_add(1 )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_lowercase ) )
| 475 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _lowerCamelCase( lowerCAmelCase__ : str ):
'''simple docstring'''
if (
(cp >= 0X4e_00 and cp <= 0X9f_ff)
or (cp >= 0X34_00 and cp <= 0X4d_bf) #
or (cp >= 0X2_00_00 and cp <= 0X2_a6_df) #
or (cp >= 0X2_a7_00 and cp <= 0X2_b7_3f) #
or (cp >= 0X2_b7_40 and cp <= 0X2_b8_1f) #
or (cp >= 0X2_b8_20 and cp <= 0X2_ce_af) #
or (cp >= 0Xf9_00 and cp <= 0Xfa_ff)
or (cp >= 0X2_f8_00 and cp <= 0X2_fa_1f) #
): #
return True
return False
def _lowerCamelCase( lowerCAmelCase__ : str ):
'''simple docstring'''
for char in word:
SCREAMING_SNAKE_CASE_ : Tuple = ord(lowerCAmelCase__ )
if not _is_chinese_char(lowerCAmelCase__ ):
return 0
return 1
def _lowerCamelCase( lowerCAmelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = set()
for token in tokens:
SCREAMING_SNAKE_CASE_ : List[Any] = len(lowerCAmelCase__ ) > 1 and is_chinese(lowerCAmelCase__ )
if chinese_word:
word_set.add(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = list(lowerCAmelCase__ )
return word_list
def _lowerCamelCase( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : set() ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
SCREAMING_SNAKE_CASE_ : Any = max([len(lowerCAmelCase__ ) for w in chinese_word_set] )
SCREAMING_SNAKE_CASE_ : List[str] = bert_tokens
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = 0, len(lowerCAmelCase__ )
while start < end:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
if is_chinese(bert_word[start] ):
SCREAMING_SNAKE_CASE_ : int = min(end - start , lowerCAmelCase__ )
for i in range(lowerCAmelCase__ , 1 , -1 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
SCREAMING_SNAKE_CASE_ : int = '##' + bert_word[j]
SCREAMING_SNAKE_CASE_ : Tuple = start + i
SCREAMING_SNAKE_CASE_ : Tuple = False
break
if single_word:
start += 1
return bert_word
def _lowerCamelCase( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : LTP , lowerCAmelCase__ : BertTokenizer ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for i in range(0 , len(lowerCAmelCase__ ) , 100 ):
SCREAMING_SNAKE_CASE_ : Optional[int] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['cws'] ).cws
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [get_chinese_word(lowerCAmelCase__ ) for r in res]
ltp_res.extend(lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = []
for i in range(0 , len(lowerCAmelCase__ ) , 100 ):
SCREAMING_SNAKE_CASE_ : List[str] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = []
for input_ids, chinese_word in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : int = []
for id in input_ids:
SCREAMING_SNAKE_CASE_ : List[str] = bert_tokenizer._convert_id_to_token(lowerCAmelCase__ )
input_tokens.append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = add_sub_symbol(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCAmelCase__ ):
if token[:2] == "##":
SCREAMING_SNAKE_CASE_ : int = token[2:]
# save chinese tokens' pos
if len(lowerCAmelCase__ ) == 1 and _is_chinese_char(ord(lowerCAmelCase__ ) ):
ref_id.append(lowerCAmelCase__ )
ref_ids.append(lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
return ref_ids
def _lowerCamelCase( lowerCAmelCase__ : Optional[Any] ):
'''simple docstring'''
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = f.readlines()
SCREAMING_SNAKE_CASE_ : Tuple = [line.strip() for line in data if len(lowerCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
SCREAMING_SNAKE_CASE_ : Dict = LTP(args.ltp ) # faster in GPU device
SCREAMING_SNAKE_CASE_ : List[Any] = BertTokenizer.from_pretrained(args.bert )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_ref(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [json.dumps(lowerCAmelCase__ ) + '\n' for ref in ref_ids]
f.writelines(lowerCAmelCase__ )
if __name__ == "__main__":
A = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
A = parser.parse_args()
main(args) | 97 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A = 'src/transformers'
A = 'docs/source/en/tasks'
def _lowerCamelCase( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
while not lines[start_index].startswith(lowerCAmelCase__ ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ : int = start_index
while not lines[end_index].startswith(lowerCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A = direct_transformers_import(TRANSFORMERS_PATH)
A = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def _lowerCamelCase( lowerCAmelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = TASK_GUIDE_TO_MODELS[task_guide]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowerCAmelCase__ , set() )
SCREAMING_SNAKE_CASE_ : Optional[int] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def _lowerCamelCase( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _find_text_in_file(
filename=os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
SCREAMING_SNAKE_CASE_ : Dict = get_model_list_for_task(lowerCAmelCase__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
' to fix this.' )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
A = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 97 | 1 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
A_ : Optional[Any] = data_utils.TransfoXLTokenizer
A_ : Union[str, Any] = data_utils.TransfoXLCorpus
A_ : Any = data_utils
A_ : Optional[Any] = data_utils
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCAmelCase__ , 'rb' ) as fp:
UpperCamelCase_: Union[str, Any] = pickle.load(UpperCAmelCase__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCamelCase_: Any = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
UpperCamelCase_: Union[str, Any] = corpus.vocab.__dict__
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: str = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , UpperCAmelCase__ )
UpperCamelCase_: str = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCamelCase_: Any = os.path.abspath(UpperCAmelCase__ )
UpperCamelCase_: Dict = os.path.abspath(UpperCAmelCase__ )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCamelCase_: List[str] = TransfoXLConfig()
else:
UpperCamelCase_: Optional[int] = TransfoXLConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Union[str, Any] = TransfoXLLMHeadModel(UpperCAmelCase__ )
UpperCamelCase_: Tuple = load_tf_weights_in_transfo_xl(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
UpperCamelCase_: str = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Union[str, Any] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''Save PyTorch model to {os.path.abspath(UpperCAmelCase__ )}''' )
torch.save(model.state_dict() , UpperCAmelCase__ )
print(F'''Save configuration file to {os.path.abspath(UpperCAmelCase__ )}''' )
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
A_ : Tuple = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 57 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE : Optional[int] = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 419 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 314 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
lowercase = int(number**0.5 )
return number == sq * sq
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowercase = x_den * y_den * z_den
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
top //= hcf
bottom //= hcf
return top, bottom
def __snake_case ( _UpperCAmelCase = 35 ):
"""simple docstring"""
lowercase = set()
lowercase = 42
lowercase = Fraction(0 )
lowercase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowercase = x_num * y_den + x_den * y_num
lowercase = x_den * y_den
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=2
lowercase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowercase = x_den * x_den * y_den * y_den
if is_sq(_UpperCAmelCase ) and is_sq(_UpperCAmelCase ):
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=-1
lowercase = x_num * y_num
lowercase = x_den * y_num + x_num * y_den
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=2
lowercase = x_num * x_num * y_num * y_num
lowercase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_UpperCAmelCase ) and is_sq(_UpperCAmelCase ):
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
for num, den in unique_s:
total += Fraction(_UpperCAmelCase , _UpperCAmelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 314 | 1 |
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Any = DebertaVaTokenizer
__magic_name__ :Tuple = DebertaVaTokenizerFast
__magic_name__ :Any = True
__magic_name__ :Any = True
def snake_case ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ :Any = DebertaVaTokenizer(__UpperCAmelCase , unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = 'this is a test'
lowerCAmelCase__ :Optional[int] = 'this is a test'
return input_text, output_text
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = '<pad>'
lowerCAmelCase__ :Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '[PAD]' )
self.assertEqual(len(__UpperCAmelCase ) , 3_0_0_0_1 )
def snake_case ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = ' \tHeLLo!how \n Are yoU? '
lowerCAmelCase__ :Tuple = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
lowerCAmelCase__ :Any = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase )
lowerCAmelCase__ :Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :str = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase )
lowerCAmelCase__ :str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def snake_case ( self ):
'''simple docstring'''
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ :Union[str, Any] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
lowerCAmelCase__ :Optional[int] = DebertaVaTokenizer(__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :str = DebertaVaTokenizerFast(__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
lowerCAmelCase__ :Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ :Tuple = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
lowerCAmelCase__ :str = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Any = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ :Optional[int] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
lowerCAmelCase__ :Optional[int] = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :int = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
lowerCAmelCase__ :str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ :Any = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
lowerCAmelCase__ :str = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = ' \tHeLLo!how \n Are yoU? '
lowerCAmelCase__ :Dict = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
lowerCAmelCase__ :List[str] = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
lowerCAmelCase__ :Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.get_tokenizer()
lowerCAmelCase__ :Dict = self.get_rust_tokenizer()
lowerCAmelCase__ :List[str] = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ :List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
lowerCAmelCase__ :Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[str] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Any = self.get_rust_tokenizer()
lowerCAmelCase__ :Union[str, Any] = tokenizer.encode(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = 'This is a test'
lowerCAmelCase__ :List[Any] = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
lowerCAmelCase__ :int = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
lowerCAmelCase__ :Optional[int] = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
lowerCAmelCase__ :Any = DebertaVaTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = DebertaVaTokenizerFast(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Any = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :str = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Dict = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Any = rust_tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# fmt: off
lowerCAmelCase__ :Any = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ :Optional[int] = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
lowerCAmelCase__ :Optional[int] = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
lowerCAmelCase__ :Any = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
lowerCAmelCase__ :int = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[str] = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Dict = rust_tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = DebertaVaTokenizer(__UpperCAmelCase )
lowerCAmelCase__ :int = tokenizer.encode('sequence builders' )
lowerCAmelCase__ :Any = tokenizer.encode('multi-sequence build' )
lowerCAmelCase__ :Dict = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __UpperCAmelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __UpperCAmelCase , )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = {'input_ids': [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 93 |
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
__A = """"""
__A = """"""
__A = """"""
__A = """"""
def __A (_SCREAMING_SNAKE_CASE ) ->None:
"""simple docstring"""
lowerCAmelCase__ :Any = tweepy.OAuthHandler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
auth.set_access_token(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = tweepy.API(_SCREAMING_SNAKE_CASE )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase__ :Union[str, Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase__ :Optional[Any] = api.user_timeline(screen_name=_SCREAMING_SNAKE_CASE , count=200 )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# save the id of the oldest tweet less one
lowerCAmelCase__ :Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_SCREAMING_SNAKE_CASE ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase__ :Union[str, Any] = api.user_timeline(
screen_name=_SCREAMING_SNAKE_CASE , count=200 , max_id=_SCREAMING_SNAKE_CASE )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# update the id of the oldest tweet less one
lowerCAmelCase__ :Tuple = alltweets[-1].id - 1
print(F"...{len(_SCREAMING_SNAKE_CASE )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase__ :Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , 'w' ) as f:
lowerCAmelCase__ :List[str] = csv.writer(_SCREAMING_SNAKE_CASE )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 93 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : bool = False ):
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
_lowerCamelCase : Any = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
_lowerCamelCase : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCAmelCase , 1 ):
if n < _p:
# then we have our last prime to check
_lowerCamelCase : str = primes[:idx]
break
_lowerCamelCase : List[str] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
_lowerCamelCase : int = False
for r in range(_lowerCAmelCase ):
_lowerCamelCase : Any = pow(_lowerCAmelCase , d * 2**r , _lowerCAmelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
_lowerCamelCase : Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def A_ ( ):
"""simple docstring"""
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin() | 705 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 | 0 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = features[:, labels == i]
_lowerCAmelCase : Tuple = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Optional[Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : List[Any] = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = features.mean(1 )
_lowerCAmelCase : List[Any] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = features[:, labels == i]
_lowerCAmelCase : Union[str, Any] = data.shape[1]
_lowerCAmelCase : str = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Union[str, Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : str = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : List[str] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : Dict = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : Any = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[Any] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : int = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : int = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : List[str] = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : Optional[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : List[str] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Optional[Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : Tuple = 2
_lowerCAmelCase : Optional[int] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : int = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 500 |
from __future__ import annotations
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = text, pattern
_lowerCAmelCase , _lowerCAmelCase : int = len(__a), len(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
for i in range(self.patLen - 1, -1, -1):
if char == self.pattern[i]:
return i
return -1
def snake_case__ ( self, __a):
'''simple docstring'''
for i in range(self.patLen - 1, -1, -1):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(self.textLen - self.patLen + 1):
_lowerCAmelCase : Dict = self.mismatch_in_text(__a)
if mismatch_index == -1:
positions.append(__a)
else:
_lowerCAmelCase : List[str] = self.match_in_pattern(self.text[mismatch_index])
_lowerCAmelCase : Dict = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_snake_case = "ABAABA"
_snake_case = "AB"
_snake_case = BoyerMooreSearch(text, pattern)
_snake_case = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 500 | 1 |
'''simple docstring'''
import math
import unittest
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __lowerCamelCase ( self ):
"""simple docstring"""
with self.assertRaises(lowercase__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 68 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list[int] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
snake_case_ = input('Enter numbers separated by a comma:\n').strip()
snake_case_ = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 68 | 1 |
a_ = {
"""joule""": 1.0,
"""kilojoule""": 1000,
"""megajoule""": 1000000,
"""gigajoule""": 1000000000,
"""wattsecond""": 1.0,
"""watthour""": 3600,
"""kilowatthour""": 3600000,
"""newtonmeter""": 1.0,
"""calorie_nutr""": 4186.8,
"""kilocalorie_nutr""": 4186800.00,
"""electronvolt""": 1.6_0_2_1_7_6_6_3_4e-1_9,
"""britishthermalunit_it""": 1055.05585,
"""footpound""": 1.355_818,
}
def __lowerCAmelCase ( A_ : str , A_ : Optional[int] , A_ : Any ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__UpperCAmelCase = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(__A )}'''
)
raise ValueError(__A )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 221 | import os
def lowerCAmelCase_ ( __A = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(__A ), __A ) ) as in_file:
UpperCAmelCase__ = in_file.read()
UpperCAmelCase__ = [[int(__A ) for cell in row.split("," )] for row in data.strip().splitlines()]
UpperCAmelCase__ = [[0 for cell in row] for row in grid]
UpperCAmelCase__ = len(grid[0] )
UpperCAmelCase__ = [[0 for i in range(__A )] for j in range(__A )]
UpperCAmelCase__ = grid[0][0]
for i in range(1, __A ):
UpperCAmelCase__ = grid[0][i] + dp[0][i - 1]
for i in range(1, __A ):
UpperCAmelCase__ = grid[i][0] + dp[i - 1][0]
for i in range(1, __A ):
for j in range(1, __A ):
UpperCAmelCase__ = grid[i][j] + min(dp[i - 1][j], dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 486 | 0 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
snake_case_ : List[Any] = pd.read_csv("sample_data.csv", header=None)
snake_case_ : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
snake_case_ : Any = df.iloc[:, 1:2]
snake_case_ : str = actual_data.values.reshape(len_data, 1)
snake_case_ : Optional[Any] = MinMaxScaler().fit_transform(actual_data)
snake_case_ : List[str] = 10
snake_case_ : Any = 5
snake_case_ : Any = 20
snake_case_ : Tuple = len_data - periods * look_back
snake_case_ : str = actual_data[:division]
snake_case_ : Optional[int] = actual_data[division - look_back :]
snake_case_ : Any = [], []
snake_case_ : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
snake_case_ : Any = np.array(train_x)
snake_case_ : Optional[Any] = np.array(test_x)
snake_case_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
snake_case_ : List[str] = np.array([list(i.ravel()) for i in test_y])
snake_case_ : List[Any] = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
snake_case_ : Dict = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
snake_case_ : Optional[Any] = model.predict(x_test)
| 719 |
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Any=13 , __magic_name__ : Any=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=99 , __magic_name__ : int=24 , __magic_name__ : Optional[int]=2 , __magic_name__ : Tuple=6 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Tuple=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Optional[int]=None , __magic_name__ : Any=10_00 , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : List[str] = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : List[str] = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : int = type_vocab_size
UpperCAmelCase_ : List[Any] = type_sequence_label_size
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Dict = num_labels
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = range_bbox
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase_ : List[str] = bbox[i, j, 3]
UpperCAmelCase_ : Dict = bbox[i, j, 1]
UpperCAmelCase_ : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase_ : List[str] = bbox[i, j, 2]
UpperCAmelCase_ : Tuple = bbox[i, j, 0]
UpperCAmelCase_ : Union[str, Any] = t
UpperCAmelCase_ : int = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase_ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = LiltModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : List[Any] = model(__magic_name__ , bbox=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : Optional[int] = model(__magic_name__ , bbox=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.num_labels
UpperCAmelCase_ : List[Any] = LiltForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Any , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = LiltForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Tuple = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Tuple = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : Any = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Union[str, Any] = False
__a : int = False
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> str:
"""simple docstring"""
return True
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = LiltModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : Tuple = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = LiltModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_torch
@slow
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__magic_name__ )
UpperCAmelCase_ : Any = torch.tensor([[1, 2]] , device=__magic_name__ )
UpperCAmelCase_ : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__magic_name__ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(input_ids=__magic_name__ , bbox=__magic_name__ )
UpperCAmelCase_ : int = torch.Size([1, 2, 7_68] )
UpperCAmelCase_ : List[str] = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=__magic_name__ , )
self.assertTrue(outputs.last_hidden_state.shape , __magic_name__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __magic_name__ , atol=1E-3 ) )
| 644 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = """▁"""
__UpperCamelCase : str = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
__UpperCamelCase : Optional[Any] = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
__UpperCamelCase : Union[str, Any] = {
"""facebook/s2t-small-librispeech-asr""": 1024,
}
__UpperCamelCase : Optional[int] = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
__UpperCamelCase : Tuple = {"""mustc""": MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =MAX_MODEL_INPUT_SIZES
__a =["""input_ids""", """attention_mask"""]
__a =[]
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase="<unk>" , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase = None , **lowerCamelCase , ) ->Tuple:
'''simple docstring'''
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , pad_token=_A , do_upper_case=_A , do_lower_case=_A , tgt_lang=_A , lang_codes=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
__a = do_upper_case
__a = do_lower_case
__a = load_json(_A )
__a = {v: k for k, v in self.encoder.items()}
__a = spm_file
__a = load_spm(_A , self.sp_model_kwargs )
if lang_codes is not None:
__a = lang_codes
__a = LANGUAGES[lang_codes]
__a = [F"""<lang:{lang}>""" for lang in self.langs]
__a = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
__a = self.lang_tokens
__a = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__a = {}
@property
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
return len(self.encoder )
@property
def __UpperCamelCase ( self ) ->List[Any]:
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def __UpperCamelCase ( self , lowerCamelCase ) ->List[Any]:
'''simple docstring'''
__a = new_tgt_lang
self.set_tgt_lang_special_tokens(_A )
def __UpperCamelCase ( self , lowerCamelCase ) ->List[Any]:
'''simple docstring'''
__a = self.lang_code_to_id[tgt_lang]
__a = [lang_code_id]
def __UpperCamelCase ( self , lowerCamelCase ) ->int:
'''simple docstring'''
return self.sp_model.encode(_A , out_type=_A )
def __UpperCamelCase ( self , lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
return self.encoder.get(_A , self.encoder[self.unk_token] )
def __UpperCamelCase ( self , lowerCamelCase ) ->str:
'''simple docstring'''
return self.decoder.get(_A , self.unk_token )
def __UpperCamelCase ( self , lowerCamelCase ) ->List[Any]:
'''simple docstring'''
__a = []
__a = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__a = self.sp_model.decode(_A )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__a = []
else:
current_sub_tokens.append(_A )
__a = self.sp_model.decode(_A )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase=None ) ->Optional[Any]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) ->List[Any]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
__a = [1] * len(self.prefix_tokens )
__a = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_A )) + suffix_ones
return prefix_ones + ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
__a = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) ->Optional[int]:
'''simple docstring'''
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self , lowerCamelCase ) ->Any:
'''simple docstring'''
__a = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a = {}
__a = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None ) ->Optional[Any]:
'''simple docstring'''
__a = Path(_A )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
__a = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
__a = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , _A )
if os.path.abspath(self.spm_file ) != os.path.abspath(_A ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _A )
elif not os.path.isfile(self.spm_file ):
with open(_A , 'wb' ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(_A )
return (str(_A ), str(_A ))
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Any, SCREAMING_SNAKE_CASE__: int ) -> Union[str, Any]:
"""simple docstring"""
__a = sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE__ )
spm.Load(str(SCREAMING_SNAKE_CASE__ ) )
return spm
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Optional[Any] ) -> List[str]:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__, 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Tuple, SCREAMING_SNAKE_CASE__: Union[str, Any] ) -> int:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__, 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, indent=2 ) | 448 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__magic_name__ : Any = logging.get_logger(__name__)
__magic_name__ : int = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : str = """convnextv2"""
def __init__( self , _A=3 , _A=4 , _A=4 , _A=None , _A=None , _A="gelu" , _A=0.02 , _A=1e-1_2 , _A=0.0 , _A=2_2_4 , _A=None , _A=None , **_A , ):
'''simple docstring'''
super().__init__(**_A )
UpperCamelCase : Optional[Any] = num_channels
UpperCamelCase : int = patch_size
UpperCamelCase : Dict = num_stages
UpperCamelCase : Tuple = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
UpperCamelCase : str = [3, 3, 9, 3] if depths is None else depths
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : Any = drop_path_rate
UpperCamelCase : Any = image_size
UpperCamelCase : Union[str, Any] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase , UpperCamelCase : Optional[int] = get_aligned_output_features_output_indices(
out_features=_A , out_indices=_A , stage_names=self.stage_names )
| 102 | 0 |
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : int = 10 ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or n < 0:
raise ValueError('''Invalid input''' )
UpperCAmelCase_ = 10**n
UpperCAmelCase_ = 2_8433 * (pow(2 , 783_0457 , _UpperCamelCase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(10) = }''')
| 43 | '''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
lowercase__ : str = datasets.logging.get_logger(__name__)
lowercase__ : Dict = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
lowercase__ : str = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
lowercase__ : str = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Union[str, Any] ) ->Any:
if self.config_name == "default":
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=False ) ->Optional[Any]:
if gpus is None:
UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0
UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references}
UpperCAmelCase_ = [dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) ) for t in zip(*data.values() )]
UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(UpperCAmelCase__ , gpus=UpperCAmelCase__ , progress_bar=UpperCAmelCase__ )
return {"mean_score": mean_score, "scores": scores}
| 43 | 1 |
Subsets and Splits