code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : list[str] | None = None , UpperCAmelCase__ : dict[str, float] | None = None , UpperCAmelCase__ : bool = False , ):
SCREAMING_SNAKE_CASE = cipher_alphabet or [chr(UpperCAmelCase__ ) for i in range(9_7 , 1_2_3 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
SCREAMING_SNAKE_CASE = {
"a": 0.0_8497,
"b": 0.0_1492,
"c": 0.0_2202,
"d": 0.0_4253,
"e": 0.1_1162,
"f": 0.0_2228,
"g": 0.0_2015,
"h": 0.0_6094,
"i": 0.0_7546,
"j": 0.0_0153,
"k": 0.0_1292,
"l": 0.0_4025,
"m": 0.0_2406,
"n": 0.0_6749,
"o": 0.0_7507,
"p": 0.0_1929,
"q": 0.0_0095,
"r": 0.0_7587,
"s": 0.0_6327,
"t": 0.0_9356,
"u": 0.0_2758,
"v": 0.0_0978,
"w": 0.0_2560,
"x": 0.0_0150,
"y": 0.0_1994,
"z": 0.0_0077,
}
else:
# Custom frequencies dictionary
SCREAMING_SNAKE_CASE = frequencies_dict
if not case_sensitive:
SCREAMING_SNAKE_CASE = ciphertext.lower()
# Chi squared statistic values
SCREAMING_SNAKE_CASE = {}
# cycle through all of the shifts
for shift in range(len(UpperCAmelCase__ ) ):
SCREAMING_SNAKE_CASE = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
SCREAMING_SNAKE_CASE = (alphabet_letters.index(letter.lower() ) - shift) % len(
UpperCAmelCase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
SCREAMING_SNAKE_CASE = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
SCREAMING_SNAKE_CASE = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
SCREAMING_SNAKE_CASE = decrypted_with_shift.lower().count(UpperCAmelCase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
SCREAMING_SNAKE_CASE = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
SCREAMING_SNAKE_CASE = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
SCREAMING_SNAKE_CASE = decrypted_with_shift.count(UpperCAmelCase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
SCREAMING_SNAKE_CASE = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
SCREAMING_SNAKE_CASE = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
SCREAMING_SNAKE_CASE = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(UpperCAmelCase__ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
SCREAMING_SNAKE_CASE = min(
UpperCAmelCase__ , key=UpperCAmelCase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 403 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Optional[int] = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
_lowerCamelCase : List[str] = {
'''squeezebert/squeezebert-uncased''': 5_12,
'''squeezebert/squeezebert-mnli''': 5_12,
'''squeezebert/squeezebert-mnli-headless''': 5_12,
}
_lowerCamelCase : int = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class lowercase ( a ):
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
lowercase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : str = SqueezeBertTokenizer
def __init__( self : Dict , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : List[Any]="[UNK]" , _UpperCamelCase : List[Any]="[SEP]" , _UpperCamelCase : Tuple="[PAD]" , _UpperCamelCase : int="[CLS]" , _UpperCamelCase : Tuple="[MASK]" , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Optional[Any]=None , **_UpperCamelCase : Any , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _UpperCamelCase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = strip_accents
SCREAMING_SNAKE_CASE = tokenize_chinese_chars
SCREAMING_SNAKE_CASE = normalizer_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = do_lower_case
def __snake_case( self : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any]=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case( self : Union[str, Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 403 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 248 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def _lowerCAmelCase ( UpperCamelCase_ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = 2
while True:
if is_prime(UpperCamelCase_ ):
yield num
num += 1
def _lowerCAmelCase ( UpperCamelCase_ = 200_0000 ):
return sum(takewhile(lambda UpperCamelCase_ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 248 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
__SCREAMING_SNAKE_CASE = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __a ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=8 ):
a__ : List[str] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
a__ : Tuple = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , A__ : MultilingualCLIP , A__ : XLMRobertaTokenizer , A__ : UNetaDConditionModel , A__ : Union[DDIMScheduler, DDPMScheduler] , A__ : VQModel , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=A__ , tokenizer=A__ , unet=A__ , scheduler=A__ , movq=A__ , )
a__ : str = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCAmelCase ( self : int , A__ : Dict , A__ : Dict , A__ : List[Any] , A__ : int , A__ : Union[str, Any] , A__ : Tuple ) -> Tuple:
'''simple docstring'''
if latents is None:
a__ : List[Any] = randn_tensor(A__ , generator=A__ , device=A__ , dtype=A__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
a__ : int = latents.to(A__ )
a__ : Any = latents * scheduler.init_noise_sigma
return latents
def __lowerCAmelCase ( self : Optional[int] , A__ : List[Any] , A__ : List[str] , A__ : str , A__ : Optional[Any] , A__ : int=None , ) -> List[str]:
'''simple docstring'''
a__ : Dict = len(A__ ) if isinstance(A__ , A__ ) else 1
# get prompt text embeddings
a__ : str = self.tokenizer(
A__ , padding='''max_length''' , truncation=A__ , max_length=7_7 , return_attention_mask=A__ , add_special_tokens=A__ , return_tensors='''pt''' , )
a__ : str = text_inputs.input_ids
a__ : int = self.tokenizer(A__ , padding='''longest''' , return_tensors='''pt''' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(A__ , A__ ):
a__ : List[str] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
a__ : Optional[Any] = text_input_ids.to(A__ )
a__ : Dict = text_inputs.attention_mask.to(A__ )
a__ , a__ : Tuple = self.text_encoder(
input_ids=A__ , attention_mask=A__ )
a__ : Optional[int] = prompt_embeds.repeat_interleave(A__ , dim=0 )
a__ : str = text_encoder_hidden_states.repeat_interleave(A__ , dim=0 )
a__ : List[str] = text_mask.repeat_interleave(A__ , dim=0 )
if do_classifier_free_guidance:
a__ : List[str]
if negative_prompt is None:
a__ : Optional[int] = [''''''] * batch_size
elif type(A__ ) is not type(A__ ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(A__ )} !='
F' {type(A__ )}.' )
elif isinstance(A__ , A__ ):
a__ : str = [negative_prompt]
elif batch_size != len(A__ ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(A__ )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
''' the batch size of `prompt`.''' )
else:
a__ : Optional[int] = negative_prompt
a__ : int = self.tokenizer(
A__ , padding='''max_length''' , max_length=7_7 , truncation=A__ , return_attention_mask=A__ , add_special_tokens=A__ , return_tensors='''pt''' , )
a__ : Dict = uncond_input.input_ids.to(A__ )
a__ : List[Any] = uncond_input.attention_mask.to(A__ )
a__ , a__ : Optional[int] = self.text_encoder(
input_ids=A__ , attention_mask=A__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
a__ : Optional[Any] = negative_prompt_embeds.shape[1]
a__ : Union[str, Any] = negative_prompt_embeds.repeat(1 , A__ )
a__ : Dict = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A__ )
a__ : Optional[int] = uncond_text_encoder_hidden_states.shape[1]
a__ : List[str] = uncond_text_encoder_hidden_states.repeat(1 , A__ , 1 )
a__ : int = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , A__ , -1 )
a__ : List[Any] = uncond_text_mask.repeat_interleave(A__ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a__ : str = torch.cat([negative_prompt_embeds, prompt_embeds] )
a__ : str = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
a__ : Union[str, Any] = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCAmelCase ( self : Tuple , A__ : Tuple=0 ) -> Optional[int]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
a__ : Optional[Any] = torch.device(F'cuda:{gpu_id}' )
a__ : Optional[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A__ , A__ )
def __lowerCAmelCase ( self : Dict , A__ : Optional[Any]=0 ) -> Any:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
a__ : Tuple = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=A__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a__ : str = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
a__ , a__ : Optional[int] = cpu_offload_with_hook(A__ , A__ , prev_module_hook=A__ )
if self.safety_checker is not None:
a__ , a__ : List[str] = cpu_offload_with_hook(self.safety_checker , A__ , prev_module_hook=A__ )
# We'll offload the last model manually.
a__ : str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self : int ) -> Any:
'''simple docstring'''
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A__ )
def __call__( self : Dict , A__ : Union[str, List[str]] , A__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A__ : Optional[Union[str, List[str]]] = None , A__ : int = 5_1_2 , A__ : int = 5_1_2 , A__ : int = 1_0_0 , A__ : float = 4.0 , A__ : int = 1 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : Optional[torch.FloatTensor] = None , A__ : Optional[str] = "pil" , A__ : bool = True , ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(A__ , A__ ):
a__ : Union[str, Any] = 1
elif isinstance(A__ , A__ ):
a__ : List[str] = len(A__ )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(A__ )}' )
a__ : List[str] = self._execution_device
a__ : int = batch_size * num_images_per_prompt
a__ : List[Any] = guidance_scale > 1.0
a__ , a__ , a__ : str = self._encode_prompt(
A__ , A__ , A__ , A__ , A__ )
if isinstance(A__ , A__ ):
a__ : List[Any] = torch.cat(A__ , dim=0 )
if isinstance(A__ , A__ ):
a__ : List[str] = torch.cat(A__ , dim=0 )
if do_classifier_free_guidance:
a__ : Any = image_embeds.repeat_interleave(A__ , dim=0 )
a__ : Dict = negative_image_embeds.repeat_interleave(A__ , dim=0 )
a__ : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=A__ )
self.scheduler.set_timesteps(A__ , device=A__ )
a__ : Optional[Any] = self.scheduler.timesteps
a__ : List[Any] = self.unet.config.in_channels
a__ , a__ : Union[str, Any] = get_new_h_w(A__ , A__ , self.movq_scale_factor )
# create initial latent
a__ : Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , A__ , A__ , A__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A__ ) ):
# expand the latents if we are doing classifier free guidance
a__ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a__ : Any = {'''text_embeds''': prompt_embeds, '''image_embeds''': image_embeds}
a__ : Dict = self.unet(
sample=A__ , timestep=A__ , encoder_hidden_states=A__ , added_cond_kwargs=A__ , return_dict=A__ , )[0]
if do_classifier_free_guidance:
a__ , a__ : Any = noise_pred.split(latents.shape[1] , dim=1 )
a__ , a__ : List[str] = noise_pred.chunk(2 )
a__ , a__ : Tuple = variance_pred.chunk(2 )
a__ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a__ : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a__ , a__ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a__ : int = self.scheduler.step(
A__ , A__ , A__ , generator=A__ , ).prev_sample
# post-processing
a__ : Dict = self.movq.decode(A__ , force_not_quantize=A__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
a__ : Optional[Any] = image * 0.5 + 0.5
a__ : str = image.clamp(0 , 1 )
a__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a__ : List[str] = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 688 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
__SCREAMING_SNAKE_CASE = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
__SCREAMING_SNAKE_CASE = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
__SCREAMING_SNAKE_CASE = reader.read()
__SCREAMING_SNAKE_CASE = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
__SCREAMING_SNAKE_CASE = UNetaDModel(**config)
else:
__SCREAMING_SNAKE_CASE = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
__SCREAMING_SNAKE_CASE = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__SCREAMING_SNAKE_CASE = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__SCREAMING_SNAKE_CASE = config[key]
del config[key]
__SCREAMING_SNAKE_CASE = [k.replace('UNetRes', '') for k in config['down_block_types']]
__SCREAMING_SNAKE_CASE = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
__SCREAMING_SNAKE_CASE = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
__SCREAMING_SNAKE_CASE = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
__SCREAMING_SNAKE_CASE = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
__SCREAMING_SNAKE_CASE = param_value
__SCREAMING_SNAKE_CASE = True
if not has_changed:
__SCREAMING_SNAKE_CASE = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 688 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=100 , UpperCamelCase=13 , UpperCamelCase=30 , UpperCamelCase=2 , UpperCamelCase=3 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=10 , UpperCamelCase=0.02 , UpperCamelCase=3 , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = vocab_size
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase_ = (image_size // patch_size) ** 2
lowerCamelCase_ = num_patches + 1
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = FlaxBeitModel(config=snake_case_ )
lowerCamelCase_ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = FlaxBeitForMaskedImageModeling(config=snake_case_ )
lowerCamelCase_ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = FlaxBeitForImageClassification(config=snake_case_ )
lowerCamelCase_ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FlaxBeitForImageClassification(snake_case_ )
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(snake_case_ )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class snake_case ( _a , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = FlaxBeitModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(snake_case_ )
lowerCamelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ = self._prepare_for_class(snake_case_ , snake_case_ )
lowerCamelCase_ = model_class(snake_case_ )
@jax.jit
def model_jitted(UpperCamelCase , **UpperCamelCase ):
return model(pixel_values=snake_case_ , **snake_case_ )
with self.subTest("JIT Enabled" ):
lowerCamelCase_ = model_jitted(**snake_case_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowerCamelCase_ = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase_ = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
lowerCamelCase_ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case_ )
def __snake_case ( ):
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@require_flax
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
"""simple docstring"""
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=snake_case_ , return_tensors="np" ).pixel_values
# prepare bool_masked_pos
lowerCamelCase_ = np.ones((1, 196) , dtype=snake_case_ )
# forward pass
lowerCamelCase_ = model(pixel_values=snake_case_ , bool_masked_pos=snake_case_ )
lowerCamelCase_ = outputs.logits
# verify the logits
lowerCamelCase_ = (1, 196, 8192)
self.assertEqual(logits.shape , snake_case_ )
lowerCamelCase_ = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , snake_case_ , atol=1e-2 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=snake_case_ , return_tensors="np" )
# forward pass
lowerCamelCase_ = model(**snake_case_ )
lowerCamelCase_ = outputs.logits
# verify the logits
lowerCamelCase_ = (1, 1000)
self.assertEqual(logits.shape , snake_case_ )
lowerCamelCase_ = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1e-4 ) )
lowerCamelCase_ = 281
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=snake_case_ , return_tensors="np" )
# forward pass
lowerCamelCase_ = model(**snake_case_ )
lowerCamelCase_ = outputs.logits
# verify the logits
lowerCamelCase_ = (1, 2_1841)
self.assertEqual(logits.shape , snake_case_ )
lowerCamelCase_ = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1e-4 ) )
lowerCamelCase_ = 2396
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
| 720 |
'''simple docstring'''
import math
def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
return math.pow(UpperCAmelCase_ , 2 ) - a
def __snake_case ( UpperCAmelCase_ : float ):
return 2 * x
def __snake_case ( UpperCAmelCase_ : float ):
lowerCamelCase_ = 2.0
while start <= a:
lowerCamelCase_ = math.pow(UpperCAmelCase_ , 2 )
return start
def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : int = 9999 , UpperCAmelCase_ : float = 0.00_0000_0000_0001 ):
if a < 0:
raise ValueError("math domain error" )
lowerCamelCase_ = get_initial_point(UpperCAmelCase_ )
for _ in range(UpperCAmelCase_ ):
lowerCamelCase_ = value
lowerCamelCase_ = value - fx(UpperCAmelCase_ , UpperCAmelCase_ ) / fx_derivative(UpperCAmelCase_ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 445 | 0 |
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def _lowerCAmelCase ( __magic_name__ : Any ) -> Optional[int]:
# getting number of pixels in the image
lowercase , lowercase : List[Any] =img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__magic_name__ ):
for j in range(__magic_name__ ):
lowercase : str =[255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCamelCase_ = imread("""image_data/lena.jpg""", 1)
# convert to its negative
UpperCamelCase_ = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 92 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase_ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase_ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, float]:
lowercase : int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] )
return (item, float(__magic_name__ ))
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, str]:
lowercase : Any =random.randint(0 , len(__magic_name__ ) - 1 )
lowercase : Tuple =parent_a[:random_slice] + parent_a[random_slice:]
lowercase : List[str] =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] ) -> str:
lowercase : Union[str, Any] =list(__magic_name__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowercase : Dict =random.choice(__magic_name__ )
return "".join(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ) -> list[str]:
lowercase : Any =[]
# Generate more children proportionally to the fitness score.
lowercase : Dict =int(parent_a[1] * 100 ) + 1
lowercase : List[str] =10 if child_n >= 10 else child_n
for _ in range(__magic_name__ ):
lowercase : List[str] =population_score[random.randint(0 , __magic_name__ )][0]
lowercase , lowercase : Dict =crossover(parent_a[0] , __magic_name__ )
# Append new string to the population list.
pop.append(mutate(__magic_name__ , __magic_name__ ) )
pop.append(mutate(__magic_name__ , __magic_name__ ) )
return pop
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowercase : List[str] =f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(__magic_name__ )
# Verify that the target contains no genes besides the ones inside genes variable.
lowercase : Optional[int] =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowercase : Dict =f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(__magic_name__ )
# Generate random starting population.
lowercase : int =[]
for _ in range(__magic_name__ ):
population.append(''''''.join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) )
# Just some logs to know what the algorithms is doing.
lowercase , lowercase : Optional[int] =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__magic_name__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowercase : List[str] =[evaluate(__magic_name__ , __magic_name__ ) for item in population]
# Check if there is a matching evolution.
lowercase : int =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowercase : Any =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__magic_name__ )
# Normalize population score to be between 0 and 1.
lowercase : Dict =[
(item, score / len(__magic_name__ )) for item, score in population_score
]
# This is selection
for i in range(__magic_name__ ):
population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__magic_name__ ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase_ = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
UpperCamelCase_ = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 92 | 1 |
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tmp_path / "cache"
__SCREAMING_SNAKE_CASE = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__SCREAMING_SNAKE_CASE = TextDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tmp_path / "cache"
__SCREAMING_SNAKE_CASE = {"text": "string"}
__SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
__SCREAMING_SNAKE_CASE = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE = TextDatasetReader(lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tmp_path / "cache"
__SCREAMING_SNAKE_CASE = {"text": "string"}
__SCREAMING_SNAKE_CASE = TextDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , split=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if issubclass(lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = text_path
elif issubclass(lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = [text_path]
__SCREAMING_SNAKE_CASE = tmp_path / "cache"
__SCREAMING_SNAKE_CASE = {"text": "string"}
__SCREAMING_SNAKE_CASE = TextDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=("train",) ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
for split in splits:
__SCREAMING_SNAKE_CASE = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tmp_path / "cache"
__SCREAMING_SNAKE_CASE = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__SCREAMING_SNAKE_CASE = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__SCREAMING_SNAKE_CASE = {"text": "string"}
__SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
__SCREAMING_SNAKE_CASE = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE = TextDatasetReader({"train": text_path} , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if split:
__SCREAMING_SNAKE_CASE = {split: text_path}
else:
__SCREAMING_SNAKE_CASE = "train"
__SCREAMING_SNAKE_CASE = {"train": text_path, "test": text_path}
__SCREAMING_SNAKE_CASE = tmp_path / "cache"
__SCREAMING_SNAKE_CASE = {"text": "string"}
__SCREAMING_SNAKE_CASE = TextDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_ , lowerCAmelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 553 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a__ : Dict = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case__ : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
snake_case__ : List[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
snake_case__ : Dict = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def UpperCAmelCase_ ( self : str ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
__SCREAMING_SNAKE_CASE = text_classifier("This is great !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.504}] )
__SCREAMING_SNAKE_CASE = text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] )
__SCREAMING_SNAKE_CASE = text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
__SCREAMING_SNAKE_CASE = text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.504}] )
# Legacy behavior
__SCREAMING_SNAKE_CASE = text_classifier("This is great !" , return_all_scores=UpperCAmelCase__ )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.504}] )
__SCREAMING_SNAKE_CASE = text_classifier("This is great !" , return_all_scores=UpperCAmelCase__ )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] )
__SCREAMING_SNAKE_CASE = text_classifier(["This is great !", "Something else"] , return_all_scores=UpperCAmelCase__ )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
__SCREAMING_SNAKE_CASE = text_classifier(["This is great !", "Something else"] , return_all_scores=UpperCAmelCase__ )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
{"label": "LABEL_0", "score": 0.504},
{"label": "LABEL_0", "score": 0.504},
] , )
@require_torch
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
import torch
__SCREAMING_SNAKE_CASE = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
__SCREAMING_SNAKE_CASE = text_classifier("This is great !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.504}] )
@require_tf
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
__SCREAMING_SNAKE_CASE = text_classifier("This is great !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.504}] )
@slow
@require_torch
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
__SCREAMING_SNAKE_CASE = pipeline("text-classification" )
__SCREAMING_SNAKE_CASE = text_classifier("This is great !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": "POSITIVE", "score": 1.0}] )
__SCREAMING_SNAKE_CASE = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": "NEGATIVE", "score": 1.0}] )
__SCREAMING_SNAKE_CASE = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": "POSITIVE", "score": 0.988}] )
@slow
@require_tf
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = pipeline("text-classification" , framework="tf" )
__SCREAMING_SNAKE_CASE = text_classifier("This is great !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": "POSITIVE", "score": 1.0}] )
__SCREAMING_SNAKE_CASE = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": "NEGATIVE", "score": 1.0}] )
__SCREAMING_SNAKE_CASE = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": "POSITIVE", "score": 0.988}] )
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE = TextClassificationPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__SCREAMING_SNAKE_CASE = "HuggingFace is in"
__SCREAMING_SNAKE_CASE = text_classifier(UpperCAmelCase__ )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
__SCREAMING_SNAKE_CASE = ["HuggingFace is in ", "Paris is in France"]
__SCREAMING_SNAKE_CASE = text_classifier(UpperCAmelCase__ )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}, {"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__SCREAMING_SNAKE_CASE = text_classifier(UpperCAmelCase__ , top_k=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [[{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}] * N, [{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}] * N] , )
__SCREAMING_SNAKE_CASE = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
__SCREAMING_SNAKE_CASE = text_classifier(UpperCAmelCase__ )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , {"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__SCREAMING_SNAKE_CASE = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(UpperCAmelCase__ ):
text_classifier(UpperCAmelCase__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__SCREAMING_SNAKE_CASE = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 553 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class lowercase :
"""simple docstring"""
_a = BlenderbotConfig
_a = {}
_a = '''gelu'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=20 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=0 , ):
'''simple docstring'''
UpperCamelCase__ :List[str] = parent
UpperCamelCase__ :int = batch_size
UpperCamelCase__ :Optional[int] = seq_length
UpperCamelCase__ :Any = is_training
UpperCamelCase__ :Any = use_labels
UpperCamelCase__ :Dict = vocab_size
UpperCamelCase__ :Optional[Any] = hidden_size
UpperCamelCase__ :Tuple = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :List[str] = hidden_dropout_prob
UpperCamelCase__ :Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = max_position_embeddings
UpperCamelCase__ :Optional[Any] = eos_token_id
UpperCamelCase__ :Union[str, Any] = pad_token_id
UpperCamelCase__ :Optional[Any] = bos_token_id
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase__ :str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase__ :int = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase__ :Optional[int] = prepare_blenderbot_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, inputs_dict
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Dict = TFBlenderbotModel(config=_lowerCamelCase ).get_decoder()
UpperCamelCase__ :Any = inputs_dict['input_ids']
UpperCamelCase__ :Dict = input_ids[:1, :]
UpperCamelCase__ :Dict = inputs_dict['attention_mask'][:1, :]
UpperCamelCase__ :Optional[Any] = inputs_dict['head_mask']
UpperCamelCase__ :List[Any] = 1
# first forward pass
UpperCamelCase__ :Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , head_mask=_lowerCamelCase , use_cache=_lowerCamelCase )
UpperCamelCase__ :List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ :Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ :int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase__ :List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase__ :int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase__ :int = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
UpperCamelCase__ :Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase__ :Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase__ :str = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase__ :str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCamelCase , _lowerCamelCase , rtol=1e-3 )
def a ( __a , __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
UpperCamelCase__ :List[Any] = tf.cast(tf.math.not_equal(UpperCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase__ :Union[str, Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase__ :Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase__ :Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase__ :Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_a = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
_a = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
_a = (
{
'''conversational''': TFBlenderbotForConditionalGeneration,
'''feature-extraction''': TFBlenderbotModel,
'''summarization''': TFBlenderbotForConditionalGeneration,
'''text2text-generation''': TFBlenderbotForConditionalGeneration,
'''translation''': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
_a = True
_a = False
_a = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = TFBlenderbotModelTester(self )
UpperCamelCase__ :int = ConfigTester(self , config_class=_lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCamelCase )
@require_tokenizers
@require_tf
class lowercase ( unittest.TestCase ):
"""simple docstring"""
_a = ['''My friends are cool but they eat too many carbs.''']
_a = '''facebook/blenderbot-400M-distill'''
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.tokenizer(self.src_text , return_tensors='''tf''' )
UpperCamelCase__ :List[Any] = self.model.generate(
model_inputs.input_ids , )
UpperCamelCase__ :List[str] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowerCamelCase )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
) | 189 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : Optional[Any] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Dict ='''distilbert'''
a : List[str] ={
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _lowerCamelCase=3_0_5_2_2 , _lowerCamelCase=5_1_2 , _lowerCamelCase=False , _lowerCamelCase=6 , _lowerCamelCase=1_2 , _lowerCamelCase=7_6_8 , _lowerCamelCase=4 * 7_6_8 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0_2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2 , _lowerCamelCase=0 , **_lowerCamelCase , ):
UpperCamelCase_: Tuple = vocab_size
UpperCamelCase_: str = max_position_embeddings
UpperCamelCase_: Optional[int] = sinusoidal_pos_embds
UpperCamelCase_: Union[str, Any] = n_layers
UpperCamelCase_: Optional[int] = n_heads
UpperCamelCase_: int = dim
UpperCamelCase_: Tuple = hidden_dim
UpperCamelCase_: Any = dropout
UpperCamelCase_: Optional[Any] = attention_dropout
UpperCamelCase_: List[str] = activation
UpperCamelCase_: Optional[Any] = initializer_range
UpperCamelCase_: Optional[Any] = qa_dropout
UpperCamelCase_: List[str] = seq_classif_dropout
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase )
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _a ( self ):
if self.task == "multiple-choice":
UpperCamelCase_: Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase_: List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 57 | 0 |
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('''only integers accepted as input''' )
else:
A_ : int = str(abs(_UpperCAmelCase ) )
A_ : List[Any] = [list(_UpperCAmelCase ) for char in range(len(_UpperCAmelCase ) )]
for index in range(len(_UpperCAmelCase ) ):
num_transpositions[index].pop(_UpperCAmelCase )
return max(
int(''''''.join(list(_UpperCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 719 |
"""simple docstring"""
from collections.abc import Generator
from math import sin
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if len(_UpperCAmelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
A_ : int = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''' )
A_ : int = format(_UpperCAmelCase , '''08x''' )[-8:]
A_ : Optional[Any] = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : int = b''''''
for char in message:
bit_string += format(_UpperCAmelCase , '''08b''' ).encode('''utf-8''' )
A_ : Optional[Any] = format(len(_UpperCAmelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_UpperCAmelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if len(_UpperCAmelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(_UpperCAmelCase ) , 512 ):
A_ : Union[str, Any] = bit_string[pos : pos + 512]
A_ : Optional[int] = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''' )
A_ : str = format(_UpperCAmelCase , '''032b''' )
A_ : List[str] = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_UpperCAmelCase , 2 )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
return (a + b) % 2**32
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = preprocess(_UpperCAmelCase )
A_ : Any = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
A_ : List[str] = 0x6_7_4_5_2_3_0_1
A_ : str = 0xe_f_c_d_a_b_8_9
A_ : Union[str, Any] = 0x9_8_b_a_d_c_f_e
A_ : Dict = 0x1_0_3_2_5_4_7_6
A_ : int = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_UpperCAmelCase ):
A_ : str = aa
A_ : Tuple = ba
A_ : Tuple = ca
A_ : int = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
A_ : str = d ^ (b & (c ^ d))
A_ : int = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
A_ : List[Any] = c ^ (d & (b ^ c))
A_ : Optional[int] = (5 * i + 1) % 16
elif i <= 47:
A_ : Optional[Any] = b ^ c ^ d
A_ : List[Any] = (3 * i + 5) % 16
else:
A_ : Dict = c ^ (b | not_aa(_UpperCAmelCase ))
A_ : Dict = (7 * i) % 16
A_ : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**32
A_ : List[str] = d
A_ : str = c
A_ : Tuple = b
A_ : Tuple = sum_aa(_UpperCAmelCase , left_rotate_aa(_UpperCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
A_ : str = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
A_ : int = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
A_ : int = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
A_ : Optional[Any] = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
A_ : Dict = reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 | 0 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE : int = TypeVar("T")
class UpperCamelCase ( Generic[T] ):
'''simple docstring'''
lowercase : deque[T] # Cache store of keys
lowercase : set[T] # References of the keys in cache
lowercase : int =10 # Maximum capacity of cache
def __init__( self , UpperCamelCase_ ):
lowercase_ :Dict = deque()
lowercase_ :Optional[int] = set()
if not n:
lowercase_ :str = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
lowercase_ :Any = n
def UpperCamelCase ( self , UpperCamelCase_ ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase_ :Optional[int] = self.dq_store.pop()
self.key_reference.remove(UpperCamelCase_ )
else:
self.dq_store.remove(UpperCamelCase_ )
self.dq_store.appendleft(UpperCamelCase_ )
self.key_reference.add(UpperCamelCase_ )
def UpperCamelCase ( self ):
for k in self.dq_store:
print(UpperCamelCase_ )
def __repr__( self ):
return f"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 257 |
import math
def UpperCamelCase ( _a ) -> bool:
'''simple docstring'''
lowercase_ :int = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_a )
def UpperCamelCase ( _a = 1 / 1_2_3_4_5 ) -> int:
'''simple docstring'''
lowercase_ :Union[str, Any] = 0
lowercase_ :List[str] = 0
lowercase_ :int = 3
while True:
lowercase_ :Optional[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_a ):
lowercase_ :List[Any] = int(_a )
total_partitions += 1
if check_partition_perfect(_a ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_a )
integer += 1
if __name__ == "__main__":
print(f"{solution() = }")
| 257 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Union[str, Any] =logging.get_logger(__name__)
def lowercase__ ( __lowercase : str ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
__UpperCamelCase = 1024
__UpperCamelCase = 4096
__UpperCamelCase = 24
__UpperCamelCase = 16
__UpperCamelCase = [5, 11, 17, 23]
__UpperCamelCase = [256, 512, 1024, 1024]
__UpperCamelCase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
__UpperCamelCase = 768
__UpperCamelCase = [1, 1, 1, 0.5]
__UpperCamelCase = [256, 512, 768, 768]
__UpperCamelCase = 150
__UpperCamelCase = 16
__UpperCamelCase = (1, 384, 384)
__UpperCamelCase = False
__UpperCamelCase = 'project'
if "ade" in checkpoint_url:
__UpperCamelCase = True
__UpperCamelCase = 768
__UpperCamelCase = [1, 1, 1, 0.5]
__UpperCamelCase = 150
__UpperCamelCase = 16
__UpperCamelCase = 'huggingface/label-files'
__UpperCamelCase = 'ade20k-id2label.json'
__UpperCamelCase = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='dataset' ) ) , 'r' ) )
__UpperCamelCase = {int(__lowercase ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
__UpperCamelCase = [1, 150, 480, 480]
return config, expected_shape
def lowercase__ ( __lowercase : List[str] ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def lowercase__ ( __lowercase : Tuple ) -> Dict:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__UpperCamelCase = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
__UpperCamelCase = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
__UpperCamelCase = name.replace('patch_embed' , '' )
if "pos_embed" in name:
__UpperCamelCase = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
__UpperCamelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
__UpperCamelCase = name.replace('proj' , 'projection' )
if "blocks" in name:
__UpperCamelCase = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
__UpperCamelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__UpperCamelCase = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
__UpperCamelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
__UpperCamelCase = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
__UpperCamelCase = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
__UpperCamelCase = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
__UpperCamelCase = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
__UpperCamelCase = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
__UpperCamelCase = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
__UpperCamelCase = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
__UpperCamelCase = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__UpperCamelCase = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
__UpperCamelCase = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
__UpperCamelCase = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
__UpperCamelCase = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
__UpperCamelCase = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
__UpperCamelCase = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__UpperCamelCase = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
__UpperCamelCase = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
__UpperCamelCase = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
__UpperCamelCase = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__UpperCamelCase = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
__UpperCamelCase = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
__UpperCamelCase = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
__UpperCamelCase = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
__UpperCamelCase = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
__UpperCamelCase = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
__UpperCamelCase = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
__UpperCamelCase = name.replace('pretrained' , 'dpt' )
if "bn" in name:
__UpperCamelCase = name.replace('bn' , 'batch_norm' )
if "head" in name:
__UpperCamelCase = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
__UpperCamelCase = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
__UpperCamelCase = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
__UpperCamelCase = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
__UpperCamelCase = name.replace('..' , '.' )
if "stem.conv" in name:
__UpperCamelCase = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
__UpperCamelCase = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
__UpperCamelCase = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
__UpperCamelCase = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
__UpperCamelCase = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
__UpperCamelCase = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
__UpperCamelCase = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def lowercase__ ( __lowercase : Optional[Any] , __lowercase : Dict ) -> Optional[int]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCamelCase = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
__UpperCamelCase = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase = in_proj_weight[: config.hidden_size, :]
__UpperCamelCase = in_proj_bias[: config.hidden_size]
__UpperCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCamelCase = in_proj_weight[
-config.hidden_size :, :
]
__UpperCamelCase = in_proj_bias[-config.hidden_size :]
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def lowercase__ ( __lowercase : Tuple , __lowercase : int , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Tuple ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase = get_dpt_config(__lowercase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
__UpperCamelCase = torch.load(__lowercase , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(__lowercase )
# rename keys
for key in state_dict.copy().keys():
__UpperCamelCase = state_dict.pop(__lowercase )
__UpperCamelCase = val
# read in qkv matrices
read_in_q_k_v(__lowercase , __lowercase )
# load HuggingFace model
__UpperCamelCase = DPTForSemanticSegmentation(__lowercase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
# Check outputs on an image
__UpperCamelCase = 480 if 'ade' in checkpoint_url else 384
__UpperCamelCase = DPTImageProcessor(size=__lowercase )
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(__lowercase , return_tensors='pt' )
# forward pass
__UpperCamelCase = model(**__lowercase ).logits if 'ade' in checkpoint_url else model(**__lowercase ).predicted_depth
if show_prediction:
__UpperCamelCase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=__lowercase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowercase )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
a__ : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
a__ : Tuple =parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 434 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowerCamelCase ( self : List[str] ):
torch.manual_seed(0 )
__UpperCamelCase = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = self.dummy_uncond_unet
__UpperCamelCase = ScoreSdeVeScheduler()
__UpperCamelCase = ScoreSdeVePipeline(unet=__A , scheduler=__A )
sde_ve.to(__A )
sde_ve.set_progress_bar_config(disable=__A )
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=__A ).images
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=__A , return_dict=__A )[
0
]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self : int ):
__UpperCamelCase = 'google/ncsnpp-church-256'
__UpperCamelCase = UNetaDModel.from_pretrained(__A )
__UpperCamelCase = ScoreSdeVeScheduler.from_pretrained(__A )
__UpperCamelCase = ScoreSdeVePipeline(unet=__A , scheduler=__A )
sde_ve.to(__A )
sde_ve.set_progress_bar_config(disable=__A )
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sde_ve(num_inference_steps=1_0 , output_type='numpy' , generator=__A ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__UpperCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 434 | 1 |
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__A : List[Any] = logging.getLogger()
def snake_case__ ( ) ->Any:
"""simple docstring"""
__lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument("-f" )
__lowercase : Optional[Any] = parser.parse_args()
return args.f
def snake_case__ ( _lowerCamelCase ) ->List[str]:
"""simple docstring"""
__lowercase : List[Any] = {}
__lowercase : int = os.path.join(_lowerCamelCase, "all_results.json" )
if os.path.exists(_lowerCamelCase ):
with open(_lowerCamelCase, "r" ) as f:
__lowercase : str = json.load(_lowerCamelCase )
else:
raise ValueError(F'can\'t find {path}' )
return results
def snake_case__ ( ) ->Optional[int]:
"""simple docstring"""
__lowercase : Optional[Any] = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
__A : List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
@classmethod
def snake_case ( cls : int ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__lowercase : Optional[Any] = tempfile.mkdtemp()
__lowercase : Any = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
__lowercase : int = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def snake_case ( cls : Any ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def snake_case ( self : Optional[Any] ):
__lowercase : Union[str, Any] = self.get_auto_remove_tmp_dir()
__lowercase : Union[str, Any] = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__lowercase : List[str] = get_results(lowercase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def snake_case ( self : str ):
__lowercase : Optional[Any] = self.get_auto_remove_tmp_dir()
__lowercase : Optional[Any] = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__lowercase : Tuple = get_results(lowercase__ )
self.assertLess(result["perplexity"] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def snake_case ( self : Tuple ):
__lowercase : int = self.get_auto_remove_tmp_dir()
__lowercase : str = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__lowercase : Optional[Any] = get_results(lowercase__ )
self.assertLess(result["perplexity"] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def snake_case ( self : str ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__lowercase : int = 7 if get_gpu_count() > 1 else 2
__lowercase : List[str] = self.get_auto_remove_tmp_dir()
__lowercase : Optional[Any] = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__lowercase : str = get_results(lowercase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def snake_case ( self : Dict ):
__lowercase : Any = self.get_auto_remove_tmp_dir()
__lowercase : int = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__lowercase : Union[str, Any] = get_results(lowercase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 2_8 )
self.assertGreaterEqual(result["eval_exact"] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def snake_case ( self : List[str] ):
__lowercase : Optional[Any] = self.get_auto_remove_tmp_dir()
__lowercase : List[Any] = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__lowercase : Union[str, Any] = get_results(lowercase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def snake_case ( self : Dict ):
__lowercase : Optional[int] = self.get_auto_remove_tmp_dir()
__lowercase : Optional[int] = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__lowercase : Any = get_results(lowercase__ )
self.assertGreaterEqual(result["eval_rouge1"] , 1_0 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def snake_case ( self : str ):
__lowercase : Dict = self.get_auto_remove_tmp_dir()
__lowercase : Optional[Any] = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__lowercase : Any = get_results(lowercase__ )
self.assertGreaterEqual(result["eval_bleu"] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , "translation_no_trainer" ) ) )
@slow
def snake_case ( self : Optional[int] ):
__lowercase : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase__ )
__lowercase : Any = self.get_auto_remove_tmp_dir()
__lowercase : List[str] = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
__lowercase : Dict = get_results(lowercase__ )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.1_0 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def snake_case ( self : Optional[int] ):
__lowercase : List[Any] = self.get_auto_remove_tmp_dir()
__lowercase : Any = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__lowercase : int = get_results(lowercase__ )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , "image_classification_no_trainer" ) ) )
| 575 |
"""simple docstring"""
from __future__ import annotations
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, ) ->tuple:
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 575 | 1 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
class _a ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 'linear'
__SCREAMING_SNAKE_CASE = 'cosine'
__SCREAMING_SNAKE_CASE = 'cosine_with_restarts'
__SCREAMING_SNAKE_CASE = 'polynomial'
__SCREAMING_SNAKE_CASE = 'constant'
__SCREAMING_SNAKE_CASE = 'constant_with_warmup'
__SCREAMING_SNAKE_CASE = 'piecewise_constant'
def __lowerCamelCase ( __a : Optimizer , __a : int = -1 ) -> Optional[Any]:
return LambdaLR(__a , lambda __a : 1 , last_epoch=__a )
def __lowerCamelCase ( __a : Optimizer , __a : int , __a : int = -1 ) -> Optional[Any]:
def lr_lambda(__a : int ):
if current_step < num_warmup_steps:
return float(__a ) / float(max(1.0 , __a ) )
return 1.0
return LambdaLR(__a , __a , last_epoch=__a )
def __lowerCamelCase ( __a : Optimizer , __a : str , __a : int = -1 ) -> Any:
_lowercase ={}
_lowercase =step_rules.split("," )
for rule_str in rule_list[:-1]:
_lowercase , _lowercase =rule_str.split(":" )
_lowercase =int(__a )
_lowercase =float(__a )
_lowercase =value
_lowercase =float(rule_list[-1] )
def create_rules_function(__a : Union[str, Any] , __a : Tuple ):
def rule_func(__a : int ) -> float:
_lowercase =sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__a ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_lowercase =create_rules_function(__a , __a )
return LambdaLR(__a , __a , last_epoch=__a )
def __lowerCamelCase ( __a : Union[str, Any] , __a : int , __a : Tuple , __a : List[Any]=-1 ) -> Dict:
def lr_lambda(__a : int ):
if current_step < num_warmup_steps:
return float(__a ) / float(max(1 , __a ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__a , __a , __a )
def __lowerCamelCase ( __a : Optimizer , __a : int , __a : int , __a : float = 0.5 , __a : int = -1 ) -> Optional[Any]:
def lr_lambda(__a : Tuple ):
if current_step < num_warmup_steps:
return float(__a ) / float(max(1 , __a ) )
_lowercase =float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__a ) * 2.0 * progress )) )
return LambdaLR(__a , __a , __a )
def __lowerCamelCase ( __a : Optimizer , __a : int , __a : int , __a : int = 1 , __a : int = -1 ) -> Dict:
def lr_lambda(__a : Tuple ):
if current_step < num_warmup_steps:
return float(__a ) / float(max(1 , __a ) )
_lowercase =float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__a ) * progress) % 1.0) )) )
return LambdaLR(__a , __a , __a )
def __lowerCamelCase ( __a : Any , __a : int , __a : Optional[Any] , __a : Optional[int]=1E-7 , __a : Optional[Any]=1.0 , __a : int=-1 ) -> Any:
_lowercase =optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(__a : int ):
if current_step < num_warmup_steps:
return float(__a ) / float(max(1 , __a ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_lowercase =lr_init - lr_end
_lowercase =num_training_steps - num_warmup_steps
_lowercase =1 - (current_step - num_warmup_steps) / decay_steps
_lowercase =lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__a , __a , __a )
lowerCAmelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowerCamelCase ( __a : Union[str, SchedulerType] , __a : Optimizer , __a : Optional[str] = None , __a : Optional[int] = None , __a : Optional[int] = None , __a : int = 1 , __a : float = 1.0 , __a : int = -1 , ) -> List[str]:
_lowercase =SchedulerType(__a )
_lowercase =TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__a , last_epoch=__a )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__a , step_rules=__a , last_epoch=__a )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__a , num_warmup_steps=__a , last_epoch=__a )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__a , num_warmup_steps=__a , num_training_steps=__a , num_cycles=__a , last_epoch=__a , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__a , num_warmup_steps=__a , num_training_steps=__a , power=__a , last_epoch=__a , )
return schedule_func(
__a , num_warmup_steps=__a , num_training_steps=__a , last_epoch=__a )
| 594 | import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowerCAmelCase__ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowerCAmelCase__ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowerCAmelCase__ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
lowerCAmelCase__ = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(6_4, 6_4), batch_size=3_2, class_mode="binary"
)
lowerCAmelCase__ = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(6_4, 6_4), batch_size=3_2, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
lowerCAmelCase__ = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(6_4, 6_4)
)
lowerCAmelCase__ = tf.keras.preprocessing.image.img_to_array(test_image)
lowerCAmelCase__ = np.expand_dims(test_image, axis=0)
lowerCAmelCase__ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowerCAmelCase__ = "Normal"
if result[0][0] == 1:
lowerCAmelCase__ = "Abnormality detected"
| 594 | 1 |
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> int:
if not isinstance(__snake_case , __snake_case ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 108 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a: Dict = logging.get_logger(__name__)
__a: Optional[int] = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = '''efficientnet'''
def __init__( self : Dict , lowerCamelCase : int = 3 , lowerCamelCase : int = 600 , lowerCamelCase : float = 2.0 , lowerCamelCase : float = 3.1 , lowerCamelCase : int = 8 , lowerCamelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowerCamelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowerCamelCase : List[int] = [] , lowerCamelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase : float = 0.25 , lowerCamelCase : str = "swish" , lowerCamelCase : int = 2560 , lowerCamelCase : str = "mean" , lowerCamelCase : float = 0.02 , lowerCamelCase : float = 0.001 , lowerCamelCase : float = 0.99 , lowerCamelCase : float = 0.5 , lowerCamelCase : float = 0.2 , **lowerCamelCase : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = width_coefficient
_UpperCAmelCase = depth_coefficient
_UpperCAmelCase = depth_divisor
_UpperCAmelCase = kernel_sizes
_UpperCAmelCase = in_channels
_UpperCAmelCase = out_channels
_UpperCAmelCase = depthwise_padding
_UpperCAmelCase = strides
_UpperCAmelCase = num_block_repeats
_UpperCAmelCase = expand_ratios
_UpperCAmelCase = squeeze_expansion_ratio
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = pooling_type
_UpperCAmelCase = initializer_range
_UpperCAmelCase = batch_norm_eps
_UpperCAmelCase = batch_norm_momentum
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = drop_connect_rate
_UpperCAmelCase = sum(lowerCamelCase ) * 4
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = version.parse('''1.11''' )
@property
def lowerCamelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase ( self : Dict ) -> float:
"""simple docstring"""
return 1E-5 | 108 | 1 |
"""simple docstring"""
def lowerCAmelCase ( UpperCamelCase_: str ) -> list[int]:
'''simple docstring'''
_a = [0 for i in range(len(UpperCamelCase_ ) )]
# initialize interval's left pointer and right pointer
_a , _a = 0, 0
for i in range(1 , len(UpperCamelCase_ ) ):
# case when current index is inside the interval
if i <= right_pointer:
_a = min(right_pointer - i + 1 , z_result[i - left_pointer] )
_a = min_edge
while go_next(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
_a , _a = i, i + z_result[i] - 1
return z_result
def lowerCAmelCase ( UpperCamelCase_: int , UpperCamelCase_: list[int] , UpperCamelCase_: str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(UpperCamelCase_ ) and s[z_result[i]] == s[i + z_result[i]]
def lowerCAmelCase ( UpperCamelCase_: str , UpperCamelCase_: str ) -> int:
'''simple docstring'''
_a = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
_a = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCamelCase_ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 612 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCamelCase = """pt"""
elif is_tf_available():
UpperCamelCase = """tf"""
else:
UpperCamelCase = """jax"""
class lowercase_ (_UpperCAmelCase, unittest.TestCase ):
A__ : List[str] = ByTaTokenizer
A__ : List[Any] = False
def lowerCamelCase__ ( self ) ->Any:
'''simple docstring'''
super().setUp()
_a = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def lowerCamelCase__ ( self , **a_ ) ->ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a_ )
def lowerCamelCase__ ( self , a_ , a_=False , a_=2_0 , a_=5 ) ->Tuple[str, list]:
'''simple docstring'''
_a = []
for i in range(len(a_ ) ):
try:
_a = tokenizer.decode([i] , clean_up_tokenization_spaces=a_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_a = list(filter(lambda a_ : re.match(R"^[ a-zA-Z]+$" , t[1] ) , a_ ) )
_a = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=a_ ) , a_ ) )
if max_length is not None and len(a_ ) > max_length:
_a = toks[:max_length]
if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0:
while len(a_ ) < min_length:
_a = toks + toks
# toks_str = [t[1] for t in toks]
_a = [t[0] for t in toks]
# Ensure consistency
_a = tokenizer.decode(a_ , clean_up_tokenization_spaces=a_ )
if " " not in output_txt and len(a_ ) > 1:
_a = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a_ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a_ )
)
if with_prefix_space:
_a = " " + output_txt
_a = tokenizer.encode(a_ , add_special_tokens=a_ )
return output_txt, output_ids
def lowerCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
_a = self.ta_base_tokenizer
_a = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
_a = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"] , batch_without_eos_added["input_ids"] )
def lowerCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
_a = self.ta_base_tokenizer
_a = "Unicode €."
_a = tokenizer(a_ )
_a = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1]
self.assertEqual(encoded["input_ids"] , a_ )
# decoding
_a = tokenizer.decode(a_ )
self.assertEqual(a_ , "Unicode €.</s>" )
_a = tokenizer("e è é ê ë" )
_a = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1]
self.assertEqual(encoded["input_ids"] , a_ )
# decoding
_a = tokenizer.decode(a_ )
self.assertEqual(a_ , "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "e è é ê ë</s>" )
def lowerCamelCase__ ( self ) ->int:
'''simple docstring'''
_a = self.ta_base_tokenizer
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
_a = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0]
# fmt: on
_a = tokenizer(a_ , padding=a_ , return_tensors=a_ )
self.assertIsInstance(a_ , a_ )
if FRAMEWORK != "jax":
_a = list(batch.input_ids.numpy()[0] )
else:
_a = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a_ , a_ )
self.assertEqual((2, 3_7) , batch.input_ids.shape )
self.assertEqual((2, 3_7) , batch.attention_mask.shape )
def lowerCamelCase__ ( self ) ->Dict:
'''simple docstring'''
_a = self.ta_base_tokenizer
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_a = tokenizer(a_ , padding=a_ , return_tensors=a_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , a_ )
self.assertIn("attention_mask" , a_ )
self.assertNotIn("decoder_input_ids" , a_ )
self.assertNotIn("decoder_attention_mask" , a_ )
def lowerCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
_a = self.ta_base_tokenizer
_a = [
"Summary of the text.",
"Another summary.",
]
_a = tokenizer(
text_target=a_ , max_length=3_2 , padding="max_length" , truncation=a_ , return_tensors=a_ )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
def lowerCamelCase__ ( self ) ->Dict:
'''simple docstring'''
_a = self.ta_base_tokenizer
_a = ["A long paragraph for summarization. </s>"]
_a = ["Summary of the text. </s>"]
# fmt: off
_a = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1]
_a = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1]
# fmt: on
_a = tokenizer(a_ , text_target=a_ )
self.assertEqual(a_ , batch["input_ids"][0] )
self.assertEqual(a_ , batch["labels"][0] )
def lowerCamelCase__ ( self ) ->int:
'''simple docstring'''
_a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
_a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_a = tempfile.mkdtemp()
_a = " He is very happy, UNwant\u00E9d,running"
_a = tokenizer.encode(a_ , add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_a = tokenizer.__class__.from_pretrained(a_ )
_a = after_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
shutil.rmtree(a_ )
_a = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_a = tempfile.mkdtemp()
_a = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
_a = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
_a = tokenizer.encode(a_ , add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_a = tokenizer.__class__.from_pretrained(a_ )
_a = after_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
_a = tokenizer.__class__.from_pretrained(a_ , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(a_ )
def lowerCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
_a = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
with open(os.path.join(a_ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
_a = json.load(a_ )
with open(os.path.join(a_ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
_a = json.load(a_ )
_a = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
_a = added_tokens_extra_ids + [
"an_additional_special_token"
]
_a = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(a_ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(a_ , a_ )
with open(os.path.join(a_ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(a_ , a_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_a = tokenizer_class.from_pretrained(
a_ , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_a = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=a_ )]
_a = tokenizer_class.from_pretrained(
a_ , additional_special_tokens=a_ , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def lowerCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
_a = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
_a = tokenizer_class.from_pretrained(a_ )
self.assertTrue(tokenizer.decode([2_5_5] ) == "" )
def lowerCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
pass
def lowerCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
_a = self.get_tokenizers(fast=a_ , do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_a = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
_a = tokenizer.convert_tokens_to_string(a_ )
self.assertIsInstance(a_ , a_ )
def lowerCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
_a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_a = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
_a = 0
_a = tokenizer.convert_ids_to_tokens(
a_ , skip_special_tokens=a_ )
for attr in attributes_list:
setattr(a_ , attr + "_id" , a_ )
self.assertEqual(getattr(a_ , a_ ) , a_ )
self.assertEqual(getattr(a_ , attr + "_id" ) , a_ )
setattr(a_ , attr + "_id" , a_ )
self.assertEqual(getattr(a_ , a_ ) , a_ )
self.assertEqual(getattr(a_ , attr + "_id" ) , a_ )
setattr(a_ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(a_ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(a_ , "additional_special_tokens_ids" ) , [] )
setattr(a_ , "additional_special_tokens_ids" , [token_id_to_test_setters] )
self.assertListEqual(getattr(a_ , "additional_special_tokens" ) , [token_to_test_setters] )
self.assertListEqual(getattr(a_ , "additional_special_tokens_ids" ) , [token_id_to_test_setters] )
| 612 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
a : Tuple = logging.getLogger(__name__)
@dataclass(frozen=_UpperCamelCase )
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
@dataclass(frozen=_UpperCamelCase )
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = 42
def __init__( self : List[Any] , a_ : str , a_ : PreTrainedTokenizer , a_ : str , a_ : Optional[int] = None , a_ : List[Any]=False , a_ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = os.path.join(
a_ , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(a_ ) , a_ , ) , )
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case = cached_features_file + ".lock"
with FileLock(a_ ):
if os.path.exists(a_ ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
__snake_case = torch.load(a_ )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
__snake_case = (
processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ )
)
logger.info("Training examples: %s" , len(a_ ) )
__snake_case = hans_convert_examples_to_features(a_ , a_ , a_ , a_ )
logger.info("Saving features into cached file %s" , a_ )
torch.save(self.features , a_ )
def __len__( self : Any ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : int , a_ : List[str] ):
"""simple docstring"""
return self.features[i]
def A ( self : Dict ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = 42
def __init__( self : Tuple , a_ : str , a_ : PreTrainedTokenizer , a_ : str , a_ : Optional[int] = 128 , a_ : List[Any]=False , a_ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
__snake_case = processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ )
__snake_case = hans_convert_examples_to_features(a_ , a_ , a_ , a_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10_000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(a_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case = tf.data.Dataset.from_generator(
a_ , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def A ( self : List[str] ):
"""simple docstring"""
return self.dataset
def __len__( self : Optional[Any] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Tuple , a_ : Tuple ):
"""simple docstring"""
return self.features[i]
def A ( self : int ):
"""simple docstring"""
return self.label_list
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : List[str] , a_ : int ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a_ , "heuristics_train_set.txt" ) ) , "train" )
def A ( self : List[str] , a_ : Optional[Any] ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a_ , "heuristics_evaluation_set.txt" ) ) , "dev" )
def A ( self : str ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def A ( self : Tuple , a_ : int , a_ : int ):
"""simple docstring"""
__snake_case = []
for i, line in enumerate(a_ ):
if i == 0:
continue
__snake_case = "%s-%s" % (set_type, line[0])
__snake_case = line[5]
__snake_case = line[6]
__snake_case = line[7][2:] if line[7].startswith("ex" ) else line[7]
__snake_case = line[0]
examples.append(InputExample(guid=a_ , text_a=a_ , text_b=a_ , label=a_ , pairID=a_ ) )
return examples
def __UpperCAmelCase ( _UpperCAmelCase : List[InputExample] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : PreTrainedTokenizer , ) -> Optional[Any]:
__snake_case = {label: i for i, label in enumerate(_UpperCAmelCase )}
__snake_case = []
for ex_index, example in tqdm.tqdm(enumerate(_UpperCAmelCase ) , desc="convert examples to features" ):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d" % (ex_index) )
__snake_case = tokenizer(
example.text_a , example.text_b , add_special_tokens=_UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" , truncation=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , )
__snake_case = label_map[example.label] if example.label in label_map else 0
__snake_case = int(example.pairID )
features.append(InputFeatures(**_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(F'''guid: {example}''' )
logger.info(F'''features: {features[i]}''' )
return features
a : Optional[Any] = {
'''hans''': 3,
}
a : str = {
'''hans''': HansProcessor,
}
| 69 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """SpeechT5FeatureExtractor"""
__SCREAMING_SNAKE_CASE = """SpeechT5Tokenizer"""
def __init__( self : List[Any] , a_ : str , a_ : str ):
"""simple docstring"""
super().__init__(a_ , a_ )
def __call__( self : Dict , *a_ : Tuple , **a_ : List[str] ):
"""simple docstring"""
__snake_case = kwargs.pop("audio" , a_ )
__snake_case = kwargs.pop("text" , a_ )
__snake_case = kwargs.pop("text_target" , a_ )
__snake_case = kwargs.pop("audio_target" , a_ )
__snake_case = kwargs.pop("sampling_rate" , a_ )
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" )
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." )
if audio is not None:
__snake_case = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
elif text is not None:
__snake_case = self.tokenizer(a_ , **a_ )
else:
__snake_case = None
if audio_target is not None:
__snake_case = self.feature_extractor(audio_target=a_ , *a_ , sampling_rate=a_ , **a_ )
__snake_case = targets["input_values"]
elif text_target is not None:
__snake_case = self.tokenizer(a_ , **a_ )
__snake_case = targets["input_ids"]
else:
__snake_case = None
if inputs is None:
return targets
if targets is not None:
__snake_case = labels
__snake_case = targets.get("attention_mask" )
if decoder_attention_mask is not None:
__snake_case = decoder_attention_mask
return inputs
def A ( self : List[str] , *a_ : str , **a_ : Dict ):
"""simple docstring"""
__snake_case = kwargs.pop("input_values" , a_ )
__snake_case = kwargs.pop("input_ids" , a_ )
__snake_case = kwargs.pop("labels" , a_ )
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs." )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." )
if input_values is not None:
__snake_case = self.feature_extractor.pad(a_ , *a_ , **a_ )
elif input_ids is not None:
__snake_case = self.tokenizer.pad(a_ , **a_ )
else:
__snake_case = None
if labels is not None:
if "input_ids" in labels or (isinstance(a_ , a_ ) and "input_ids" in labels[0]):
__snake_case = self.tokenizer.pad(a_ , **a_ )
__snake_case = targets["input_ids"]
else:
__snake_case = self.feature_extractor.feature_size
__snake_case = self.feature_extractor.num_mel_bins
__snake_case = self.feature_extractor.pad(a_ , *a_ , **a_ )
__snake_case = feature_size_hack
__snake_case = targets["input_values"]
else:
__snake_case = None
if inputs is None:
return targets
if targets is not None:
__snake_case = labels
__snake_case = targets.get("attention_mask" )
if decoder_attention_mask is not None:
__snake_case = decoder_attention_mask
return inputs
def A ( self : List[str] , *a_ : Any , **a_ : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def A ( self : Optional[int] , *a_ : Union[str, Any] , **a_ : str ):
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
| 69 | 1 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class __a ( unittest.TestCase ):
def snake_case_ ( self ):
if self.framework == "pytorch":
subprocess.run(
F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=a__ , )
assert hasattr(self , 'env' )
def snake_case_ ( self , a__ ):
_lowerCamelCase = F'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
_lowerCamelCase = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=a__ , instance_count=a__ , instance_type=self.instance_type , debugger_hook_config=a__ , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=a__ , py_version='py36' , )
def snake_case_ ( self , a__ ):
TrainingJobAnalytics(a__ ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def snake_case_ ( self , a__ ):
# create estimator
_lowerCamelCase = self.create_estimator(a__ )
# run training
estimator.fit()
# result dataframe
_lowerCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'{estimator.latest_training_job.name}.json' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , a__ )
| 222 |
"""simple docstring"""
A_ : List[Any] =9.8_0665
def SCREAMING_SNAKE_CASE_ ( snake_case : float , snake_case : float , snake_case : float = g )-> float:
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 222 | 1 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=13 ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=99 ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=5 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=None ,) -> Any:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : List[str] = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : Optional[Any] = use_input_mask
UpperCAmelCase_ : Optional[Any] = use_token_type_ids
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_multiple_size
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : Union[str, Any] = hidden_dropout
UpperCAmelCase_ : List[Any] = attention_dropout
UpperCAmelCase_ : Optional[int] = weight_tying
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = type_vocab_size
UpperCAmelCase_ : List[Any] = type_sequence_label_size
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : str = num_labels
UpperCAmelCase_ : int = num_choices
UpperCAmelCase_ : Optional[Any] = scope
def a__ ( self ) -> str:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase_ : str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ ( self ) -> int:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_multiple_size=self.intermediate_multiple_size ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,weight_tying=self.weight_tying ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_SCREAMING_SNAKE_CASE ,initializer_range=self.initializer_range ,)
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ : Dict = True
return config, input_ids, input_mask, token_labels
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCAmelCase_ : int = GPTNeoXJapaneseModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Tuple = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : str = True
UpperCAmelCase_ : int = GPTNeoXJapaneseModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : int = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ : List[str] = GPTNeoXJapaneseForCausalLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : List[Any] = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : str = GPTNeoXJapaneseForCausalLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
UpperCAmelCase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,use_cache=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : Dict = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_ : Optional[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
UpperCAmelCase_ : Optional[Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,output_hidden_states=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = output_from_no_past["""hidden_states"""][0]
UpperCAmelCase_ : Tuple = model(
_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,past_key_values=_SCREAMING_SNAKE_CASE ,output_hidden_states=_SCREAMING_SNAKE_CASE ,)["""hidden_states"""][0]
# select random slice
UpperCAmelCase_ : Union[str, Any] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
UpperCAmelCase_ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase_ : str = config_and_inputs
UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __a( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCAmelCase = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def a__ ( self ) -> str:
UpperCAmelCase_ : List[str] = GPTNeoXJapaneseModelTester(self )
UpperCAmelCase_ : List[str] = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,hidden_size=37 )
def a__ ( self ) -> str:
self.config_tester.run_common_tests()
def a__ ( self ) -> Any:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase_ : List[str] = None
self.model_tester.create_and_check_model_as_decoder(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_SCREAMING_SNAKE_CASE )
@slow
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = """abeja/gpt-neox-japanese-2.7b"""
UpperCAmelCase_ : List[str] = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
UpperCAmelCase_ : Any = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
UpperCAmelCase_ : Dict = GPTNeoXJapaneseTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = GPTNeoXJapaneseForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = []
for prompt in prompts:
UpperCAmelCase_ : Optional[Any] = tokenizer(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).input_ids
UpperCAmelCase_ : Any = model.generate(_SCREAMING_SNAKE_CASE ,max_length=50 )
UpperCAmelCase_ : Dict = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE ,skip_special_tokens=_SCREAMING_SNAKE_CASE )
predicted_outputs += generated_string
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) | 30 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
UpperCAmelCase : Dict = logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__a = field(
default=A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a = field(
default=A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__a = field(
default=A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__a = field(
default=A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__a = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__a = field(
default=A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = field(default=A , metadata={"""help""": """The input training data file (a text file)."""} )
__a = field(
default=A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__a = field(
default=A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__a = field(
default=A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__a = field(
default=A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__a = field(
default=A , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__a = field(
default=A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__a = field(
default=A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.train_file is not None:
__UpperCAmelCase : List[Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__UpperCAmelCase : List[str] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = 42
__a = True
__a = None
__a = None
def __call__( self : Tuple , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : List[str] = """label""" if """label""" in features[0].keys() else """labels"""
__UpperCAmelCase : Union[str, Any] = [feature.pop(UpperCamelCase ) for feature in features]
__UpperCAmelCase : str = len(UpperCamelCase )
__UpperCAmelCase : Dict = len(features[0]["""input_ids"""] )
__UpperCAmelCase : int = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCamelCase )] for feature in features
]
__UpperCAmelCase : str = list(chain(*UpperCamelCase ) )
__UpperCAmelCase : int = self.tokenizer.pad(
UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
__UpperCAmelCase : Optional[Any] = {k: v.view(UpperCamelCase , UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
__UpperCAmelCase : int = torch.tensor(UpperCamelCase , dtype=torch.intaa )
return batch
def lowerCamelCase ( ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCAmelCase : Any = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
datasets.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__UpperCAmelCase : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__UpperCAmelCase : str = {}
if data_args.train_file is not None:
__UpperCAmelCase : str = data_args.train_file
if data_args.validation_file is not None:
__UpperCAmelCase : Union[str, Any] = data_args.validation_file
__UpperCAmelCase : List[Any] = data_args.train_file.split(""".""" )[-1]
__UpperCAmelCase : Optional[int] = load_dataset(
_UpperCamelCase , data_files=_UpperCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__UpperCAmelCase : Any = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : Dict = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__UpperCAmelCase : Dict = [f'''ending{i}''' for i in range(4 )]
__UpperCAmelCase : Any = """sent1"""
__UpperCAmelCase : List[str] = """sent2"""
if data_args.max_seq_length is None:
__UpperCAmelCase : List[str] = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
__UpperCAmelCase : str = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
__UpperCAmelCase : Optional[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_UpperCamelCase : str ):
__UpperCAmelCase : List[str] = [[context] * 4 for context in examples[context_name]]
__UpperCAmelCase : Union[str, Any] = examples[question_header_name]
__UpperCAmelCase : int = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(_UpperCamelCase )
]
# Flatten out
__UpperCAmelCase : List[str] = list(chain(*_UpperCamelCase ) )
__UpperCAmelCase : List[Any] = list(chain(*_UpperCamelCase ) )
# Tokenize
__UpperCAmelCase : Optional[int] = tokenizer(
_UpperCamelCase , _UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_UpperCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__UpperCAmelCase : List[Any] = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__UpperCAmelCase : Optional[int] = min(len(_UpperCamelCase ) , data_args.max_train_samples )
__UpperCAmelCase : Union[str, Any] = train_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
__UpperCAmelCase : List[str] = train_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__UpperCAmelCase : int = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__UpperCAmelCase : Dict = min(len(_UpperCamelCase ) , data_args.max_eval_samples )
__UpperCAmelCase : Any = eval_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
__UpperCAmelCase : Any = eval_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__UpperCAmelCase : Optional[int] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_UpperCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_UpperCamelCase : Dict ):
__UpperCAmelCase ,__UpperCAmelCase : List[str] = eval_predictions
__UpperCAmelCase : Optional[int] = np.argmax(_UpperCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__UpperCAmelCase : Tuple = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , compute_metrics=_UpperCamelCase , )
# Training
if training_args.do_train:
__UpperCAmelCase : str = None
if training_args.resume_from_checkpoint is not None:
__UpperCAmelCase : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCAmelCase : List[str] = last_checkpoint
__UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__UpperCAmelCase : Dict = train_result.metrics
__UpperCAmelCase : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCamelCase )
)
__UpperCAmelCase : List[Any] = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics("""train""" , _UpperCamelCase )
trainer.save_metrics("""train""" , _UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__UpperCAmelCase : List[str] = trainer.evaluate()
__UpperCAmelCase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics("""eval""" , _UpperCamelCase )
trainer.save_metrics("""eval""" , _UpperCamelCase )
__UpperCAmelCase : Tuple = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
def lowerCamelCase ( _UpperCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 139 | 0 |
import os
import sys
import unittest
SCREAMING_SNAKE_CASE_:str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE_:List[Any] = os.path.join(git_repo_path, """src""", """transformers""")
SCREAMING_SNAKE_CASE_:str = """
{0} = None
"""
SCREAMING_SNAKE_CASE_:str = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
SCREAMING_SNAKE_CASE_:List[Any] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : str = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(lowerCamelCase__ )
A : Optional[int] = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(lowerCamelCase__, """tokenizers""" )
A : Optional[int] = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(lowerCamelCase__, """tensorflow_text""" )
A : Dict = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(lowerCamelCase__, """sentencepiece_and_tokenizers""" )
A : List[Any] = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(lowerCamelCase__, """sentencepiece_and_tensorflow_text""" )
A : List[Any] = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(lowerCamelCase__, """sentencepiece_and_tokenizers_and_vision""" )
def _lowerCAmelCase ( self ):
A : Optional[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""", lowerCamelCase__ )
self.assertIn("""tensorflow_text""", lowerCamelCase__ )
self.assertIn("""sentencepiece_and_tokenizers""", lowerCamelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""", objects["""torch"""] )
self.assertIn("""TFBertModel""", objects["""tf"""] )
self.assertIn("""FlaxBertModel""", objects["""flax"""] )
self.assertIn("""BertModel""", objects["""torch"""] )
self.assertIn("""TFBertTokenizer""", objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""", objects["""sentencepiece_and_tokenizers"""] )
def _lowerCAmelCase ( self ):
A : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" )
self.assertEqual(lowerCamelCase__, """\nCONSTANT = None\n""" )
A : int = create_dummy_object("""function""", """'torch'""" )
self.assertEqual(
lowerCamelCase__, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
A : Union[str, Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
A : Tuple = create_dummy_object("""FakeClass""", """'torch'""" )
self.assertEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
A : Optional[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""], lowerCamelCase__ )
| 520 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@staticmethod
def _lowerCAmelCase ( *lowerCamelCase__, **lowerCamelCase__ ):
pass
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
A : List[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __UpperCamelCase ( _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : Union[str, Any] = np.array(_lowerCAmelCase )
A : str = npimg.shape
return {"hash": hashimage(_lowerCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__lowerCamelCase : Union[str, Any] = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Union[str, Any] = MaskGenerationPipeline(model=lowerCamelCase__, image_processor=lowerCamelCase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def _lowerCAmelCase ( self ):
pass
@slow
@require_torch
def _lowerCAmelCase ( self ):
A : Tuple = pipeline("""mask-generation""", model="""facebook/sam-vit-huge""" )
A : Dict = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""", points_per_batch=256 )
# Shortening by hashing
A : Union[str, Any] = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase__, decimals=4 ), [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871}
], )
# fmt: on
@require_torch
@slow
def _lowerCAmelCase ( self ):
A : Union[str, Any] = """facebook/sam-vit-huge"""
A : int = pipeline("""mask-generation""", model=lowerCamelCase__ )
A : int = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""", pred_iou_thresh=1, points_per_batch=256 )
# Shortening by hashing
A : Tuple = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase__, decimals=4 ), [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
], )
| 520 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = 1
for i in range(1, num + 1 ):
fact *= i
return fact
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = 0
while number > 0:
_UpperCamelCase = number % 10
sum_of_digits += last_digit
_UpperCamelCase = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase__ ( __snake_case = 1_00 ) -> int:
"""simple docstring"""
_UpperCamelCase = factorial(__snake_case )
_UpperCamelCase = split_and_add(__snake_case )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 19 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Optional[Any]=13 , UpperCamelCase : List[Any]=16 , UpperCamelCase : Tuple=7 , UpperCamelCase : Any=True , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : List[str]=True , UpperCamelCase : Dict=False , UpperCamelCase : str=True , UpperCamelCase : Any=2 , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : str=4 , UpperCamelCase : str=4 , UpperCamelCase : Union[str, Any]=30 , UpperCamelCase : Any=0 , UpperCamelCase : Union[str, Any]=1 , UpperCamelCase : int=2 , UpperCamelCase : int=None , ):
"""simple docstring"""
_lowercase : Tuple = parent
_lowercase : Optional[Any] = batch_size
_lowercase : Any = decoder_seq_length
# For common tests
_lowercase : Union[str, Any] = self.decoder_seq_length
_lowercase : str = is_training
_lowercase : int = use_attention_mask
_lowercase : Any = use_labels
_lowercase : List[str] = vocab_size
_lowercase : int = d_model
_lowercase : Optional[int] = d_model
_lowercase : Optional[int] = decoder_layers
_lowercase : str = decoder_layers
_lowercase : Dict = decoder_ffn_dim
_lowercase : Union[str, Any] = decoder_attention_heads
_lowercase : Optional[Any] = decoder_attention_heads
_lowercase : int = eos_token_id
_lowercase : Optional[Any] = bos_token_id
_lowercase : Any = pad_token_id
_lowercase : List[str] = decoder_start_token_id
_lowercase : str = use_cache
_lowercase : str = max_position_embeddings
_lowercase : Union[str, Any] = None
_lowercase : int = decoder_seq_length
_lowercase : List[Any] = 2
_lowercase : str = 1
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowercase : Optional[Any] = None
if self.use_attention_mask:
_lowercase : str = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowercase : int = None
if self.use_labels:
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowercase : List[str] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowerCAmelCase_ ( self : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : int , ):
"""simple docstring"""
_lowercase : List[str] = True
_lowercase : int = TrOCRDecoder(config=UpperCamelCase ).to(UpperCamelCase ).eval()
_lowercase : List[Any] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowercase : List[str] = model(UpperCamelCase , use_cache=UpperCamelCase )
_lowercase : Optional[Any] = model(UpperCamelCase )
_lowercase : Tuple = model(UpperCamelCase , use_cache=UpperCamelCase )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 )
_lowercase : int = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
_lowercase : Any = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowercase : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowercase : str = model(UpperCamelCase )['''last_hidden_state''']
_lowercase : str = model(UpperCamelCase , past_key_values=UpperCamelCase )['''last_hidden_state''']
# select random slice
_lowercase : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowercase : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowercase : List[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : Tuple = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : List[Any] = config_and_inputs
_lowercase : str = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , A_ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
UpperCAmelCase_ = (TrOCRForCausalLM,) if is_torch_available() else ()
UpperCAmelCase_ = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
UpperCAmelCase_ = True
UpperCAmelCase_ = False
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_lowercase : int = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCamelCase )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*UpperCamelCase )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
pass | 322 | 0 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__lowercase = TypeVar('''T''')
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
return (position - 1) // 2
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
return (2 * position) + 1
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
return (2 * position) + 2
class _snake_case ( Generic[T] ):
"""simple docstring"""
def __init__( self : str ):
lowerCAmelCase_ : list[tuple[T, int]] =[]
lowerCAmelCase_ : dict[T, int] ={}
lowerCAmelCase_ : int =0
def __len__( self : Dict ):
return self.elements
def __repr__( self : str ):
return str(self.heap )
def __A ( self : Union[str, Any] ):
# Check if the priority queue is empty
return self.elements == 0
def __A ( self : Union[str, Any] , UpperCamelCase_ : T , UpperCamelCase_ : int ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
lowerCAmelCase_ : Optional[int] =self.elements
self.elements += 1
self._bubble_up(UpperCamelCase_ )
def __A ( self : Optional[Any] ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
lowerCAmelCase_ , lowerCAmelCase_ : Any =self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowerCAmelCase_ , lowerCAmelCase_ : Dict =self.heap[0]
self._bubble_down(UpperCamelCase_ )
return elem
def __A ( self : List[str] , UpperCamelCase_ : T , UpperCamelCase_ : int ):
# Update the weight of the given key
lowerCAmelCase_ : Optional[Any] =self.position_map[elem]
lowerCAmelCase_ : Optional[int] =(elem, weight)
if position > 0:
lowerCAmelCase_ : str =get_parent_position(UpperCamelCase_ )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] =self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(UpperCamelCase_ )
else:
self._bubble_down(UpperCamelCase_ )
else:
self._bubble_down(UpperCamelCase_ )
def __A ( self : Optional[Any] , UpperCamelCase_ : T ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
lowerCAmelCase_ : Dict =self.position_map[elem]
if curr_pos == 0:
return None
lowerCAmelCase_ : List[Any] =get_parent_position(UpperCamelCase_ )
lowerCAmelCase_ , lowerCAmelCase_ : Dict =self.heap[curr_pos]
lowerCAmelCase_ , lowerCAmelCase_ : Tuple =self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(UpperCamelCase_ , UpperCamelCase_ )
return self._bubble_up(UpperCamelCase_ )
return None
def __A ( self : int , UpperCamelCase_ : T ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
lowerCAmelCase_ : Optional[int] =self.position_map[elem]
lowerCAmelCase_ , lowerCAmelCase_ : str =self.heap[curr_pos]
lowerCAmelCase_ : Tuple =get_child_left_position(UpperCamelCase_ )
lowerCAmelCase_ : Optional[int] =get_child_right_position(UpperCamelCase_ )
if child_left_position < self.elements and child_right_position < self.elements:
lowerCAmelCase_ , lowerCAmelCase_ : str =self.heap[child_left_position]
lowerCAmelCase_ , lowerCAmelCase_ : int =self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(UpperCamelCase_ , UpperCamelCase_ )
return self._bubble_down(UpperCamelCase_ )
if child_left_position < self.elements:
lowerCAmelCase_ , lowerCAmelCase_ : str =self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(UpperCamelCase_ , UpperCamelCase_ )
return self._bubble_down(UpperCamelCase_ )
else:
return None
if child_right_position < self.elements:
lowerCAmelCase_ , lowerCAmelCase_ : int =self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(UpperCamelCase_ , UpperCamelCase_ )
return self._bubble_down(UpperCamelCase_ )
return None
def __A ( self : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : int ):
# Swap the nodes at the given positions
lowerCAmelCase_ : Union[str, Any] =self.heap[nodea_pos][0]
lowerCAmelCase_ : Dict =self.heap[nodea_pos][0]
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] =(
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowerCAmelCase_ : Optional[int] =nodea_pos
lowerCAmelCase_ : Tuple =nodea_pos
class _snake_case ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple ):
lowerCAmelCase_ : dict[T, dict[T, int]] ={}
lowerCAmelCase_ : int =0
def __repr__( self : Any ):
return str(self.connections )
def __len__( self : str ):
return self.nodes
def __A ( self : Optional[Any] , UpperCamelCase_ : T ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
lowerCAmelCase_ : Union[str, Any] ={}
self.nodes += 1
def __A ( self : List[Any] , UpperCamelCase_ : T , UpperCamelCase_ : T , UpperCamelCase_ : int ):
# Add an edge between 2 nodes in the graph
self.add_node(UpperCamelCase_ )
self.add_node(UpperCamelCase_ )
lowerCAmelCase_ : Any =weight
lowerCAmelCase_ : str =weight
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , ):
lowerCAmelCase_ : dict[T, int] ={node: maxsize for node in graph.connections}
lowerCAmelCase_ : dict[T, T | None] ={node: None for node in graph.connections}
lowerCAmelCase_ : MinPriorityQueue[T] =MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if priority_queue.is_empty():
return dist, parent
# initialization
lowerCAmelCase_ : List[str] =priority_queue.extract_min()
lowerCAmelCase_ : str =0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase_ : Dict =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] )
lowerCAmelCase_ : Any =node
# running prim's algorithm
while not priority_queue.is_empty():
lowerCAmelCase_ : str =priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase_ : List[Any] =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] )
lowerCAmelCase_ : List[str] =node
return dist, parent
| 305 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__lowercase = logging.getLogger(__name__)
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase_ : List[Any]=-1 ):
# in NER datasets, the last column is usually reserved for NER label
lowerCAmelCase_ : Tuple =label_idx
def __A ( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[Split, str] ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase_ : Any =mode.value
lowerCAmelCase_ : List[str] =os.path.join(UpperCamelCase_ , F'{mode}.txt' )
lowerCAmelCase_ : Tuple =1
lowerCAmelCase_ : Dict =[]
with open(UpperCamelCase_ , encoding='''utf-8''' ) as f:
lowerCAmelCase_ : Optional[Any] =[]
lowerCAmelCase_ : Optional[Any] =[]
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=UpperCamelCase_ , labels=UpperCamelCase_ ) )
guid_index += 1
lowerCAmelCase_ : Dict =[]
lowerCAmelCase_ : int =[]
else:
lowerCAmelCase_ : Tuple =line.split(''' ''' )
words.append(splits[0] )
if len(UpperCamelCase_ ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=UpperCamelCase_ , labels=UpperCamelCase_ ) )
return examples
def __A ( self : List[str] , UpperCamelCase_ : TextIO , UpperCamelCase_ : TextIO , UpperCamelCase_ : List ):
lowerCAmelCase_ : Any =0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(UpperCamelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase_ : List[str] =line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(UpperCamelCase_ )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def __A ( self : int , UpperCamelCase_ : str ):
if path:
with open(UpperCamelCase_ , '''r''' ) as f:
lowerCAmelCase_ : int =f.read().splitlines()
if "O" not in labels:
lowerCAmelCase_ : str =['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[str] ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __A ( self : Optional[int] , UpperCamelCase_ : str ):
if path:
with open(UpperCamelCase_ , '''r''' ) as f:
lowerCAmelCase_ : Tuple =f.read().splitlines()
if "O" not in labels:
lowerCAmelCase_ : Optional[int] =['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
def __A ( self : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[Split, str] ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase_ : str =mode.value
lowerCAmelCase_ : Tuple =os.path.join(UpperCamelCase_ , F'{mode}.txt' )
lowerCAmelCase_ : Any =1
lowerCAmelCase_ : Union[str, Any] =[]
with open(UpperCamelCase_ , encoding='''utf-8''' ) as f:
for sentence in parse_incr(UpperCamelCase_ ):
lowerCAmelCase_ : int =[]
lowerCAmelCase_ : Tuple =[]
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=UpperCamelCase_ , labels=UpperCamelCase_ ) )
guid_index += 1
return examples
def __A ( self : Dict , UpperCamelCase_ : TextIO , UpperCamelCase_ : TextIO , UpperCamelCase_ : List ):
lowerCAmelCase_ : Optional[Any] =0
for sentence in parse_incr(UpperCamelCase_ ):
lowerCAmelCase_ : List[str] =preds_list[example_id]
lowerCAmelCase_ : str =''''''
for token in sentence:
out += F'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(UpperCamelCase_ )
example_id += 1
def __A ( self : Union[str, Any] , UpperCamelCase_ : str ):
if path:
with open(UpperCamelCase_ , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 305 | 1 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : Union[str, Any] = """data2vec-audio"""
def __init__( self, snake_case__=32, snake_case__=7_68, snake_case__=12, snake_case__=12, snake_case__=30_72, snake_case__="gelu", snake_case__=0.1, snake_case__=0.1, snake_case__=0.1, snake_case__=0.0, snake_case__=0.1, snake_case__=0.1, snake_case__=0.02, snake_case__=1E-5, snake_case__="gelu", snake_case__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12), snake_case__=(5, 2, 2, 2, 2, 2, 2), snake_case__=(10, 3, 3, 3, 3, 2, 2), snake_case__=False, snake_case__=16, snake_case__=19, snake_case__=5, snake_case__=0.05, snake_case__=10, snake_case__=2, snake_case__=0.0, snake_case__=10, snake_case__=0, snake_case__="sum", snake_case__=False, snake_case__=False, snake_case__=2_56, snake_case__=(5_12, 5_12, 5_12, 5_12, 15_00), snake_case__=(5, 3, 3, 1, 1), snake_case__=(1, 2, 3, 1, 1), snake_case__=5_12, snake_case__=0, snake_case__=1, snake_case__=2, snake_case__=False, snake_case__=3, snake_case__=2, snake_case__=3, snake_case__=None, **snake_case__, ) -> str:
"""simple docstring"""
super().__init__(**snake_case__, pad_token_id=snake_case__, bos_token_id=snake_case__, eos_token_id=snake_case__ )
lowercase_ : List[Any] = hidden_size
lowercase_ : int = feat_extract_activation
lowercase_ : str = list(snake_case__ )
lowercase_ : Optional[Any] = list(snake_case__ )
lowercase_ : str = list(snake_case__ )
lowercase_ : int = conv_bias
lowercase_ : int = num_conv_pos_embeddings
lowercase_ : Optional[int] = num_conv_pos_embedding_groups
lowercase_ : List[str] = conv_pos_kernel_size
lowercase_ : List[str] = len(self.conv_dim )
lowercase_ : Dict = num_hidden_layers
lowercase_ : Tuple = intermediate_size
lowercase_ : str = hidden_act
lowercase_ : str = num_attention_heads
lowercase_ : Optional[int] = hidden_dropout
lowercase_ : List[Any] = attention_dropout
lowercase_ : Any = activation_dropout
lowercase_ : Union[str, Any] = feat_proj_dropout
lowercase_ : int = final_dropout
lowercase_ : Union[str, Any] = layerdrop
lowercase_ : List[Any] = layer_norm_eps
lowercase_ : Optional[int] = initializer_range
lowercase_ : Any = vocab_size
lowercase_ : Union[str, Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase_ : List[str] = mask_time_prob
lowercase_ : Optional[int] = mask_time_length
lowercase_ : List[Any] = mask_time_min_masks
lowercase_ : List[Any] = mask_feature_prob
lowercase_ : Tuple = mask_feature_length
lowercase_ : int = mask_feature_min_masks
# ctc loss
lowercase_ : int = ctc_loss_reduction
lowercase_ : Tuple = ctc_zero_infinity
# adapter
lowercase_ : Union[str, Any] = add_adapter
lowercase_ : int = adapter_kernel_size
lowercase_ : Optional[Any] = adapter_stride
lowercase_ : Union[str, Any] = num_adapter_layers
lowercase_ : int = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase_ : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase_ : Optional[Any] = list(snake_case__ )
lowercase_ : Any = list(snake_case__ )
lowercase_ : Tuple = list(snake_case__ )
lowercase_ : int = xvector_output_dim
@property
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
return math.prod(self.conv_stride ) | 458 |
def __magic_name__ ( lowercase ) -> list:
"""simple docstring"""
if n_term == "":
return []
lowercase_ : list = []
for temp in range(int(lowercase ) ):
series.append(f"""1/{temp + 1}""" if series else """1""" )
return series
if __name__ == "__main__":
UpperCAmelCase_ = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term)) | 458 | 1 |
def __lowerCAmelCase ( _UpperCamelCase ) -> list:
'''simple docstring'''
lowerCamelCase__: Optional[int] = len(_UpperCamelCase )
for _ in range(_UpperCamelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCamelCase__ , lowerCamelCase__: List[Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_lowercase = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 242 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase = [
'small',
'small-base',
'medium',
'medium-base',
'intermediate',
'intermediate-base',
'large',
'large-base',
'xlarge',
'xlarge-base',
]
_lowercase = {
'vocab_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json',
'funnel-transformer/small-base': (
'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'
),
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json',
'funnel-transformer/large-base': (
'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'
),
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'
),
},
}
_lowercase = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
_lowercase = {F"""funnel-transformer/{name}""": {'do_lower_case': True} for name in _model_names}
class lowerCamelCase__ ( A__ ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = FunnelTokenizer
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = 2
def __init__( self : Tuple , __a : Optional[Any]=None , __a : List[Any]=None , __a : str=True , __a : Union[str, Any]="<unk>" , __a : List[Any]="<sep>" , __a : Any="<pad>" , __a : int="<cls>" , __a : Tuple="<mask>" , __a : Any="<s>" , __a : Dict="</s>" , __a : Optional[Any]=True , __a : Union[str, Any]=True , __a : Optional[Any]=None , __a : Optional[Any]="##" , **__a : Optional[int] , ):
'''simple docstring'''
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , bos_token=__a , eos_token=__a , clean_text=__a , tokenize_chinese_chars=__a , strip_accents=__a , wordpieces_prefix=__a , **__a , )
lowerCamelCase__: int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __a ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __a ) != tokenize_chinese_chars
):
lowerCamelCase__: List[str] = getattr(__a , normalizer_state.pop("""type""" ) )
lowerCamelCase__: Union[str, Any] = do_lower_case
lowerCamelCase__: int = strip_accents
lowerCamelCase__: Tuple = tokenize_chinese_chars
lowerCamelCase__: Dict = normalizer_class(**__a )
lowerCamelCase__: Dict = do_lower_case
def lowerCamelCase_ ( self : List[str] , __a : Dict , __a : List[Any]=None ):
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self : Optional[Any] , __a : List[int] , __a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCamelCase__: Tuple = [self.sep_token_id]
lowerCamelCase__: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : Optional[Any] , __a : str , __a : Optional[str] = None ):
'''simple docstring'''
lowerCamelCase__: Any = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 242 | 1 |
"""simple docstring"""
import sys
def UpperCAmelCase ( snake_case : Union[str, Any] ):
_lowerCAmelCase:Dict = len(snake_case )
_lowerCAmelCase:List[str] = [[0 for x in range(snake_case )] for x in range(snake_case )]
_lowerCAmelCase:str = [[0 for x in range(snake_case )] for x in range(snake_case )]
for chain_length in range(2 , snake_case ):
for a in range(1 , n - chain_length + 1 ):
_lowerCAmelCase:int = a + chain_length - 1
_lowerCAmelCase:str = sys.maxsize
for c in range(snake_case , snake_case ):
_lowerCAmelCase:Union[str, Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_lowerCAmelCase:int = cost
_lowerCAmelCase:str = c
return matrix, sol
def UpperCAmelCase ( snake_case : Optional[int] , snake_case : Dict , snake_case : str ):
if i == j:
print('''A''' + str(snake_case ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(snake_case , snake_case , optimal_solution[i][j] )
print_optiomal_solution(snake_case , optimal_solution[i][j] + 1 , snake_case )
print(''')''' , end=''' ''' )
def UpperCAmelCase ( ):
_lowerCAmelCase:List[Any] = [30, 35, 15, 5, 10, 20, 25]
_lowerCAmelCase:Optional[Any] = len(snake_case )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_lowerCAmelCase , _lowerCAmelCase:Optional[int] = matrix_chain_order(snake_case )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(snake_case , 1 , n - 1 )
if __name__ == "__main__":
main()
| 227 |
"""simple docstring"""
from datetime import datetime
import requests
def UpperCAmelCase ( snake_case : str ):
_lowerCAmelCase:Optional[Any] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
_lowerCAmelCase:Any = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(snake_case ).content
if __name__ == "__main__":
UpperCamelCase__ = input('''Enter Video/IGTV url: ''').strip()
UpperCamelCase__ = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F"Done. Video saved to disk as {file_name}.")
| 227 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowercase =logging.get_logger(__name__)
lowercase ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase ={
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
lowercase ={'allegro/herbert-base-cased': 514}
lowercase ={}
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =VOCAB_FILES_NAMES
UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase =PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase =HerbertTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case="</s>" , **snake_case , ) -> Any:
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , sep_token=snake_case , **snake_case , )
def lowerCAmelCase ( self , snake_case , snake_case = None) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Any =[self.cls_token_id]
_UpperCAmelCase : Optional[Any] =[self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase ( self , snake_case , snake_case = None , snake_case = False) -> str:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case)
if token_ids_a is None:
return [1] + ([0] * len(snake_case)) + [1]
return [1] + ([0] * len(snake_case)) + [1] + ([0] * len(snake_case)) + [1]
def lowerCAmelCase ( self , snake_case , snake_case = None) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple =[self.sep_token_id]
_UpperCAmelCase : str =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowerCAmelCase ( self , snake_case , snake_case = None) -> Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] =self._tokenizer.model.save(snake_case , name=snake_case)
return tuple(snake_case)
| 716 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase =logging.get_logger(__name__)
lowercase ={
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="dpr"
def __init__( self , snake_case=3_0_5_2_2 , snake_case=7_6_8 , snake_case=1_2 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=2 , snake_case=0.02 , snake_case=1E-1_2 , snake_case=0 , snake_case="absolute" , snake_case = 0 , **snake_case , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=snake_case , **snake_case)
_UpperCAmelCase : int =vocab_size
_UpperCAmelCase : Dict =hidden_size
_UpperCAmelCase : List[Any] =num_hidden_layers
_UpperCAmelCase : List[Any] =num_attention_heads
_UpperCAmelCase : str =hidden_act
_UpperCAmelCase : Optional[Any] =intermediate_size
_UpperCAmelCase : Optional[Any] =hidden_dropout_prob
_UpperCAmelCase : Tuple =attention_probs_dropout_prob
_UpperCAmelCase : int =max_position_embeddings
_UpperCAmelCase : Tuple =type_vocab_size
_UpperCAmelCase : Union[str, Any] =initializer_range
_UpperCAmelCase : Tuple =layer_norm_eps
_UpperCAmelCase : int =projection_dim
_UpperCAmelCase : List[Any] =position_embedding_type
| 331 | 0 |
from __future__ import annotations
__a = '#'
class lowercase__:
"""simple docstring"""
def __init__( self : Dict ) -> None:
lowercase_ = {}
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : str ) -> None:
lowercase_ = self._trie
for char in text:
if char not in trie:
lowercase_ = {}
lowercase_ = trie[char]
lowercase_ = True
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : str ) -> tuple | list:
lowercase_ = self._trie
for char in prefix:
if char in trie:
lowercase_ = trie[char]
else:
return []
return self._elements(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : dict ) -> tuple:
lowercase_ = []
for c, v in d.items():
lowercase_ = [''' '''] if c == END else [(c + s) for s in self._elements(SCREAMING_SNAKE_CASE_ )]
result.extend(SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
__a = Trie()
__a = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def a ( snake_case__: str ):
'''simple docstring'''
lowercase_ = trie.find_word(snake_case__ )
return tuple(string + word for word in suffixes )
def a ( ):
'''simple docstring'''
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 97 |
def _A ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : List[str] =len(SCREAMING_SNAKE_CASE )
a__ : Optional[int] =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
a__ : Optional[int] =True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
a__ : str =False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
a__ : str =subset[i - 1][j]
if arr[i - 1] <= j:
a__ : Tuple =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 563 | 0 |
def lowercase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
_snake_case : int = _modexpt(SCREAMING_SNAKE_CASE__ , exponent // 2 , SCREAMING_SNAKE_CASE__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(SCREAMING_SNAKE_CASE__ , exponent - 1 , SCREAMING_SNAKE_CASE__ )) % modulo_value
def lowercase ( SCREAMING_SNAKE_CASE__ : int = 1_777 , SCREAMING_SNAKE_CASE__ : int = 1_855 , SCREAMING_SNAKE_CASE__ : int = 8 ) -> int:
_snake_case : List[str] = base
for _ in range(1 , SCREAMING_SNAKE_CASE__ ):
_snake_case : str = _modexpt(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 198 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : str = AudioLDMPipeline
snake_case_ : Any = TEXT_TO_AUDIO_PARAMS
snake_case_ : Tuple = TEXT_TO_AUDIO_BATCH_PARAMS
snake_case_ : Dict = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def UpperCamelCase_ ( self : Any) -> Any:
"""simple docstring"""
torch.manual_seed(0)
_snake_case : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=lowerCAmelCase , )
_snake_case : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0)
_snake_case : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0)
_snake_case : Any = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
_snake_case : Any = ClapTextModelWithProjection(lowerCAmelCase)
_snake_case : List[str] = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77)
_snake_case : List[Any] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=lowerCAmelCase , )
_snake_case : Tuple = SpeechTaHifiGan(lowerCAmelCase)
_snake_case : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict=0) -> int:
"""simple docstring"""
if str(lowerCAmelCase).startswith("""mps"""):
_snake_case : List[str] = torch.manual_seed(lowerCAmelCase)
else:
_snake_case : Optional[Any] = torch.Generator(device=lowerCAmelCase).manual_seed(lowerCAmelCase)
_snake_case : Tuple = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def UpperCamelCase_ ( self : List[Any]) -> str:
"""simple docstring"""
_snake_case : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case : Dict = self.get_dummy_components()
_snake_case : Optional[Any] = AudioLDMPipeline(**lowerCAmelCase)
_snake_case : Optional[int] = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : str = self.get_dummy_inputs(lowerCAmelCase)
_snake_case : Tuple = audioldm_pipe(**lowerCAmelCase)
_snake_case : int = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase) == 256
_snake_case : Union[str, Any] = audio[:10]
_snake_case : Union[str, Any] = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033])
assert np.abs(audio_slice - expected_slice).max() < 1E-2
def UpperCamelCase_ ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_snake_case : Dict = self.get_dummy_components()
_snake_case : Any = AudioLDMPipeline(**lowerCAmelCase)
_snake_case : int = audioldm_pipe.to(lowerCAmelCase)
_snake_case : str = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : str = self.get_dummy_inputs(lowerCAmelCase)
_snake_case : List[Any] = 3 * [inputs["""prompt"""]]
# forward
_snake_case : str = audioldm_pipe(**lowerCAmelCase)
_snake_case : Dict = output.audios[0]
_snake_case : Dict = self.get_dummy_inputs(lowerCAmelCase)
_snake_case : List[str] = 3 * [inputs.pop("""prompt""")]
_snake_case : List[Any] = audioldm_pipe.tokenizer(
lowerCAmelCase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCAmelCase , return_tensors="""pt""" , )
_snake_case : List[Any] = text_inputs["""input_ids"""].to(lowerCAmelCase)
_snake_case : str = audioldm_pipe.text_encoder(
lowerCAmelCase , )
_snake_case : Optional[int] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_snake_case : Tuple = F.normalize(lowerCAmelCase , dim=-1)
_snake_case : List[Any] = prompt_embeds
# forward
_snake_case : Union[str, Any] = audioldm_pipe(**lowerCAmelCase)
_snake_case : str = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1E-2
def UpperCamelCase_ ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_snake_case : List[Any] = self.get_dummy_components()
_snake_case : Optional[Any] = AudioLDMPipeline(**lowerCAmelCase)
_snake_case : Tuple = audioldm_pipe.to(lowerCAmelCase)
_snake_case : List[Any] = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : Optional[int] = self.get_dummy_inputs(lowerCAmelCase)
_snake_case : Optional[Any] = 3 * ["""this is a negative prompt"""]
_snake_case : int = negative_prompt
_snake_case : Dict = 3 * [inputs["""prompt"""]]
# forward
_snake_case : Dict = audioldm_pipe(**lowerCAmelCase)
_snake_case : Union[str, Any] = output.audios[0]
_snake_case : str = self.get_dummy_inputs(lowerCAmelCase)
_snake_case : Union[str, Any] = 3 * [inputs.pop("""prompt""")]
_snake_case : Tuple = []
for p in [prompt, negative_prompt]:
_snake_case : Optional[Any] = audioldm_pipe.tokenizer(
lowerCAmelCase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCAmelCase , return_tensors="""pt""" , )
_snake_case : Any = text_inputs["""input_ids"""].to(lowerCAmelCase)
_snake_case : int = audioldm_pipe.text_encoder(
lowerCAmelCase , )
_snake_case : Optional[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_snake_case : str = F.normalize(lowerCAmelCase , dim=-1)
embeds.append(lowerCAmelCase)
_snake_case , _snake_case : int = embeds
# forward
_snake_case : List[str] = audioldm_pipe(**lowerCAmelCase)
_snake_case : Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1E-2
def UpperCamelCase_ ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case : List[Any] = self.get_dummy_components()
_snake_case : int = PNDMScheduler(skip_prk_steps=lowerCAmelCase)
_snake_case : Tuple = AudioLDMPipeline(**lowerCAmelCase)
_snake_case : str = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : List[str] = self.get_dummy_inputs(lowerCAmelCase)
_snake_case : List[Any] = """egg cracking"""
_snake_case : Optional[Any] = audioldm_pipe(**lowerCAmelCase , negative_prompt=lowerCAmelCase)
_snake_case : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase) == 256
_snake_case : int = audio[:10]
_snake_case : Any = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032])
assert np.abs(audio_slice - expected_slice).max() < 1E-2
def UpperCamelCase_ ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case : Any = self.get_dummy_components()
_snake_case : int = PNDMScheduler(skip_prk_steps=lowerCAmelCase)
_snake_case : Tuple = AudioLDMPipeline(**lowerCAmelCase)
_snake_case : Union[str, Any] = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : List[str] = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
_snake_case : List[Any] = audioldm_pipe(lowerCAmelCase , num_inference_steps=2).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_snake_case : Optional[Any] = 2
_snake_case : Optional[int] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
_snake_case : List[Any] = 2
_snake_case : Tuple = audioldm_pipe(lowerCAmelCase , num_inference_steps=2 , num_waveforms_per_prompt=lowerCAmelCase).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
_snake_case : Dict = 2
_snake_case : int = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=lowerCAmelCase).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def UpperCamelCase_ ( self : Optional[int]) -> int:
"""simple docstring"""
_snake_case : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case : Optional[int] = self.get_dummy_components()
_snake_case : Any = AudioLDMPipeline(**lowerCAmelCase)
_snake_case : Any = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : str = audioldm_pipe.vocoder.config.sampling_rate
_snake_case : List[str] = self.get_dummy_inputs(lowerCAmelCase)
_snake_case : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 , **lowerCAmelCase)
_snake_case : str = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase) / vocoder_sampling_rate == 0.016
_snake_case : int = audioldm_pipe(audio_length_in_s=0.032 , **lowerCAmelCase)
_snake_case : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase) / vocoder_sampling_rate == 0.032
def UpperCamelCase_ ( self : int) -> List[Any]:
"""simple docstring"""
_snake_case : str = self.get_dummy_components()
_snake_case : List[str] = AudioLDMPipeline(**lowerCAmelCase)
_snake_case : str = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : List[str] = ["""hey"""]
_snake_case : int = audioldm_pipe(lowerCAmelCase , num_inference_steps=1)
_snake_case : Optional[int] = output.audios.shape
assert audio_shape == (1, 256)
_snake_case : Dict = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_snake_case : str = SpeechTaHifiGan(lowerCAmelCase).to(lowerCAmelCase)
_snake_case : List[str] = audioldm_pipe(lowerCAmelCase , num_inference_steps=1)
_snake_case : Dict = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def UpperCamelCase_ ( self : Dict) -> Dict:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase)
def UpperCamelCase_ ( self : str) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowerCAmelCase)
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase_ ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase)
@slow
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Any , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any]="cpu" , lowerCAmelCase : Any=torch.floataa , lowerCAmelCase : Optional[Any]=0) -> Tuple:
"""simple docstring"""
_snake_case : Any = torch.Generator(device=lowerCAmelCase).manual_seed(lowerCAmelCase)
_snake_case : Optional[Any] = np.random.RandomState(lowerCAmelCase).standard_normal((1, 8, 128, 16))
_snake_case : List[str] = torch.from_numpy(lowerCAmelCase).to(device=lowerCAmelCase , dtype=lowerCAmelCase)
_snake_case : Optional[int] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def UpperCamelCase_ ( self : Any) -> List[str]:
"""simple docstring"""
_snake_case : Dict = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""")
_snake_case : Optional[int] = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : Tuple = self.get_inputs(lowerCAmelCase)
_snake_case : int = 25
_snake_case : Optional[Any] = audioldm_pipe(**lowerCAmelCase).audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase) == 8_1920
_snake_case : Any = audio[7_7230:7_7240]
_snake_case : str = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315])
_snake_case : List[str] = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1E-2
def UpperCamelCase_ ( self : Dict) -> List[Any]:
"""simple docstring"""
_snake_case : Dict = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""")
_snake_case : Any = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config)
_snake_case : Optional[int] = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : Union[str, Any] = self.get_inputs(lowerCAmelCase)
_snake_case : List[str] = audioldm_pipe(**lowerCAmelCase).audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase) == 8_1920
_snake_case : Optional[Any] = audio[2_7780:2_7790]
_snake_case : int = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212])
_snake_case : List[Any] = np.abs(expected_slice - audio_slice).max()
assert max_diff < 3E-2
| 198 | 1 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''vocab.txt'''}
__magic_name__ = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
__magic_name__ = {
'''facebook/esm2_t6_8M_UR50D''': 1_024,
'''facebook/esm2_t12_35M_UR50D''': 1_024,
}
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
with open(__lowerCAmelCase , "r" ) as f:
snake_case__ = f.read().splitlines()
return [l.strip() for l in lines]
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : Optional[int] = VOCAB_FILES_NAMES
_A : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Any = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase , lowerCamelCase="<unk>" , lowerCamelCase="<cls>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase="<eos>" , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
snake_case__ = load_vocab_file(lowerCamelCase )
snake_case__ = dict(enumerate(self.all_tokens ) )
snake_case__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
snake_case__ = unk_token
snake_case__ = cls_token
snake_case__ = pad_token
snake_case__ = mask_token
snake_case__ = eos_token
snake_case__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A_ ( self , lowerCamelCase ):
return self._id_to_token.get(lowerCamelCase , self.unk_token )
def A_ ( self , lowerCamelCase ):
return self._token_to_id.get(lowerCamelCase , self._token_to_id.get(self.unk_token ) )
def A_ ( self , lowerCamelCase , **lowerCamelCase ):
return text.split()
def A_ ( self , lowerCamelCase=False ):
return len(self._id_to_token )
def A_ ( self ):
return {token: i for i, token in enumerate(self.all_tokens )}
def A_ ( self , lowerCamelCase ):
return self._token_to_id.get(lowerCamelCase , self._token_to_id.get(self.unk_token ) )
def A_ ( self , lowerCamelCase ):
return self._id_to_token.get(lowerCamelCase , self.unk_token )
def A_ ( self , lowerCamelCase , lowerCamelCase = None ):
snake_case__ = [self.cls_token_id]
snake_case__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A_ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
snake_case__ = [1] + ([0] * len(lowerCamelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCamelCase ) + [1]
return mask
def A_ ( self , lowerCamelCase , lowerCamelCase ):
snake_case__ = os.path.join(lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(lowerCamelCase , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def A_ ( self ):
return self.get_vocab_size(with_added_tokens=lowerCamelCase )
def A_ ( self , lowerCamelCase , lowerCamelCase = False ):
return super()._add_tokens(lowerCamelCase , special_tokens=lowerCamelCase )
| 276 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def A_ ( self ):
snake_case__ = "ZinengTang/tvlt-base"
snake_case__ = tempfile.mkdtemp()
def A_ ( self , **lowerCamelCase ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **lowerCamelCase )
def A_ ( self , **lowerCamelCase ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowerCamelCase )
def A_ ( self ):
shutil.rmtree(self.tmpdirname )
def A_ ( self ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
snake_case__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def A_ ( self ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
snake_case__ = np.ones([1_20_00] )
snake_case__ = feature_extractor(lowerCamelCase , return_tensors="np" )
snake_case__ = processor(audio=lowerCamelCase , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = image_processor(lowerCamelCase , return_tensors="np" )
snake_case__ = processor(images=lowerCamelCase , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
snake_case__ = np.ones([1_20_00] )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = processor(audio=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def A_ ( self ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 276 | 1 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
_A : str = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self: str , snake_case: int , snake_case: int , snake_case: Optional[int] = None , snake_case: int = 50_257 , snake_case: int = 1_024 , snake_case: int = 768 , snake_case: int = 12 , snake_case: int = 12 , snake_case: Optional[int] = None , snake_case: str = "gelu_new" , snake_case: float = 0.1 , snake_case: float = 0.1 , snake_case: float = 0.1 , snake_case: float = 1E-5 , snake_case: float = 0.0_2 , snake_case: bool = True , snake_case: bool = True , snake_case: bool = False , snake_case: bool = False , ) -> Optional[Any]:
super().__init__()
snake_case_ :Any = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
snake_case_ :List[str] = prefix_inner_dim
snake_case_ :Optional[Any] = prefix_hidden_dim
snake_case_ :Optional[Any] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
snake_case_ :str = (
nn.Linear(self.prefix_hidden_dim , snake_case ) if self.prefix_hidden_dim is not None else nn.Identity()
)
snake_case_ :Dict = GPTaConfig(
vocab_size=snake_case , n_positions=snake_case , n_embd=snake_case , n_layer=snake_case , n_head=snake_case , n_inner=snake_case , activation_function=snake_case , resid_pdrop=snake_case , embd_pdrop=snake_case , attn_pdrop=snake_case , layer_norm_epsilon=snake_case , initializer_range=snake_case , scale_attn_weights=snake_case , use_cache=snake_case , scale_attn_by_inverse_layer_idx=snake_case , reorder_and_upcast_attn=snake_case , )
snake_case_ :Tuple = GPTaLMHeadModel(snake_case )
def lowerCAmelCase_ ( self: Tuple , snake_case: torch.Tensor , snake_case: torch.Tensor , snake_case: Optional[torch.Tensor] = None , snake_case: Optional[torch.Tensor] = None , ) -> Union[str, Any]:
snake_case_ :List[str] = self.transformer.transformer.wte(snake_case )
snake_case_ :Union[str, Any] = self.encode_prefix(snake_case )
snake_case_ :Optional[int] = self.decode_prefix(snake_case )
snake_case_ :int = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
snake_case_ :Dict = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
snake_case_ :Dict = torch.cat((dummy_token, input_ids) , dim=1 )
snake_case_ :Dict = self.transformer(inputs_embeds=snake_case , labels=snake_case , attention_mask=snake_case )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCAmelCase_ ( self: Optional[int] , snake_case: int , snake_case: torch.device ) -> torch.Tensor:
return torch.zeros(snake_case , self.prefix_length , dtype=torch.intaa , device=snake_case )
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: str ) -> Union[str, Any]:
return self.encode_prefix(snake_case )
@torch.no_grad()
def lowerCAmelCase_ ( self: str , snake_case: Dict , snake_case: Optional[int] , snake_case: List[str] ) -> Union[str, Any]:
snake_case_ :List[Any] = torch.split(snake_case , 1 , dim=0 )
snake_case_ :str = []
snake_case_ :List[Any] = []
for feature in features:
snake_case_ :str = self.decode_prefix(feature.to(snake_case ) ) # back to the clip feature
# Only support beam search for now
snake_case_, snake_case_ :List[Any] = self.generate_beam(
input_embeds=snake_case , device=snake_case , eos_token_id=snake_case )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
snake_case_ :Dict = torch.stack(snake_case )
snake_case_ :Any = torch.stack(snake_case )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCAmelCase_ ( self: Dict , snake_case: int=None , snake_case: Tuple=None , snake_case: str=None , snake_case: int = 5 , snake_case: int = 67 , snake_case: float = 1.0 , snake_case: Optional[int] = None , ) -> Optional[Any]:
snake_case_ :Tuple = eos_token_id
snake_case_ :Dict = None
snake_case_ :Dict = None
snake_case_ :Any = torch.ones(snake_case , device=snake_case , dtype=torch.int )
snake_case_ :Optional[int] = torch.zeros(snake_case , device=snake_case , dtype=torch.bool )
if input_embeds is not None:
snake_case_ :Tuple = input_embeds
else:
snake_case_ :Optional[int] = self.transformer.transformer.wte(snake_case )
for i in range(snake_case ):
snake_case_ :str = self.transformer(inputs_embeds=snake_case )
snake_case_ :Optional[Any] = outputs.logits
snake_case_ :Any = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
snake_case_ :Any = logits.softmax(-1 ).log()
if scores is None:
snake_case_, snake_case_ :Optional[Any] = logits.topk(snake_case , -1 )
snake_case_ :Tuple = generated.expand(snake_case , *generated.shape[1:] )
snake_case_, snake_case_ :Union[str, Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
snake_case_ :Optional[int] = next_tokens
else:
snake_case_ :List[str] = tokens.expand(snake_case , *tokens.shape[1:] )
snake_case_ :Union[str, Any] = torch.cat((tokens, next_tokens) , dim=1 )
else:
snake_case_ :Dict = -float(np.inf )
snake_case_ :str = 0
snake_case_ :List[str] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
snake_case_ :List[str] = scores_sum / seq_lengths[:, None]
snake_case_, snake_case_ :List[str] = scores_sum_average.view(-1 ).topk(snake_case , -1 )
snake_case_ :Dict = next_tokens // scores_sum.shape[1]
snake_case_ :List[str] = seq_lengths[next_tokens_source]
snake_case_ :List[str] = next_tokens % scores_sum.shape[1]
snake_case_ :Optional[int] = next_tokens.unsqueeze(1 )
snake_case_ :List[str] = tokens[next_tokens_source]
snake_case_ :Union[str, Any] = torch.cat((tokens, next_tokens) , dim=1 )
snake_case_ :Any = generated[next_tokens_source]
snake_case_ :List[Any] = scores_sum_average * seq_lengths
snake_case_ :int = is_stopped[next_tokens_source]
snake_case_ :Any = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
snake_case_ :Union[str, Any] = torch.cat((generated, next_token_embed) , dim=1 )
snake_case_ :Any = is_stopped + next_tokens.eq(snake_case ).squeeze()
if is_stopped.all():
break
snake_case_ :Optional[int] = scores / seq_lengths
snake_case_ :Tuple = scores.argsort(descending=snake_case )
# tokens tensors are already padded to max_seq_length
snake_case_ :int = [tokens[i] for i in order]
snake_case_ :List[str] = torch.stack(snake_case , dim=0 )
snake_case_ :List[str] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 310 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : int = """time_series_transformer"""
_A : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self: Dict , snake_case: Optional[int] = None , snake_case: Optional[int] = None , snake_case: str = "student_t" , snake_case: str = "nll" , snake_case: int = 1 , snake_case: List[int] = [1, 2, 3, 4, 5, 6, 7] , snake_case: Optional[Union[str, bool]] = "mean" , snake_case: int = 0 , snake_case: int = 0 , snake_case: int = 0 , snake_case: int = 0 , snake_case: Optional[List[int]] = None , snake_case: Optional[List[int]] = None , snake_case: int = 32 , snake_case: int = 32 , snake_case: int = 2 , snake_case: int = 2 , snake_case: int = 2 , snake_case: int = 2 , snake_case: bool = True , snake_case: str = "gelu" , snake_case: int = 64 , snake_case: float = 0.1 , snake_case: float = 0.1 , snake_case: float = 0.1 , snake_case: float = 0.1 , snake_case: float = 0.1 , snake_case: int = 100 , snake_case: float = 0.0_2 , snake_case: List[str]=True , **snake_case: List[str] , ) -> Union[str, Any]:
# time series specific configuration
snake_case_ :Any = prediction_length
snake_case_ :Any = context_length or prediction_length
snake_case_ :int = distribution_output
snake_case_ :Any = loss
snake_case_ :List[Any] = input_size
snake_case_ :Any = num_time_features
snake_case_ :Any = lags_sequence
snake_case_ :Any = scaling
snake_case_ :Any = num_dynamic_real_features
snake_case_ :List[str] = num_static_real_features
snake_case_ :List[Any] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
snake_case_ :int = cardinality
else:
snake_case_ :List[str] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
snake_case_ :int = embedding_dimension
else:
snake_case_ :int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ :Dict = num_parallel_samples
# Transformer architecture configuration
snake_case_ :Any = input_size * len(snake_case ) + self._number_of_features
snake_case_ :Dict = d_model
snake_case_ :Optional[Any] = encoder_attention_heads
snake_case_ :Tuple = decoder_attention_heads
snake_case_ :Any = encoder_ffn_dim
snake_case_ :Any = decoder_ffn_dim
snake_case_ :Tuple = encoder_layers
snake_case_ :int = decoder_layers
snake_case_ :Tuple = dropout
snake_case_ :Any = attention_dropout
snake_case_ :List[str] = activation_dropout
snake_case_ :Any = encoder_layerdrop
snake_case_ :str = decoder_layerdrop
snake_case_ :Union[str, Any] = activation_function
snake_case_ :List[str] = init_std
snake_case_ :int = use_cache
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def lowerCAmelCase_ ( self: Optional[Any] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 310 | 1 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@slow
def _a (self ):
'''simple docstring'''
lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
lowerCamelCase = AutoTokenizer.from_pretrained("google/mt5-small" )
lowerCamelCase = tokenizer("Hello there" , return_tensors="np" ).input_ids
lowerCamelCase = tokenizer("Hi I am" , return_tensors="np" ).input_ids
lowerCamelCase = shift_tokens_right(__a , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCamelCase = model(__a , decoder_input_ids=__a ).logits
lowerCamelCase = optax.softmax_cross_entropy(__a , onehot(__a , logits.shape[-1] ) ).mean()
lowerCamelCase = -(labels.shape[-1] * loss.item())
lowerCamelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 ) | 623 |
from __future__ import annotations
from math import pow, sqrt
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCAmelCase__ , 2 ) - pow(UpperCAmelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCAmelCase__ , 2 ) - pow(UpperCAmelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCAmelCase__ , 2 ) + pow(UpperCAmelCase__ , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 623 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=18 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , ):
lowercase = size if size is not None else {'height': 18, 'width': 18}
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = size
lowercase = apply_ocr
def SCREAMING_SNAKE_CASE__ ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A_ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Any = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = LayoutLMvaImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , 'do_resize' ) )
self.assertTrue(hasattr(snake_case , 'size' ) )
self.assertTrue(hasattr(snake_case , 'apply_ocr' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , snake_case )
self.assertIsInstance(encoding.boxes , snake_case )
# Test batched
lowercase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self ):
# with apply_OCR = True
lowercase = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowercase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
lowercase = Image.open(ds[0]['file'] ).convert('RGB' )
lowercase = image_processing(snake_case , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowercase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
lowercase = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , snake_case )
self.assertListEqual(encoding.boxes , snake_case )
# with apply_OCR = False
lowercase = LayoutLMvaImageProcessor(apply_ocr=snake_case )
lowercase = image_processing(snake_case , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 565 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 565 | 1 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
if isinstance(__lowerCamelCase , torch.Tensor ):
return image
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
__snake_case : List[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
__snake_case : Tuple = np.concatenate(__lowerCamelCase , axis=0 )
__snake_case : Tuple = np.array(__lowerCamelCase ).astype(np.floataa ) / 255.0
__snake_case : List[str] = image.transpose(0 , 3 , 1 , 2 )
__snake_case : List[str] = 2.0 * image - 1.0
__snake_case : Union[str, Any] = torch.from_numpy(__lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__snake_case : int = torch.cat(__lowerCamelCase , dim=0 )
return image
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0.99_95 ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(__lowerCamelCase , np.ndarray ):
__snake_case : Any = True
__snake_case : Any = va.device
__snake_case : str = va.cpu().numpy()
__snake_case : Optional[int] = va.cpu().numpy()
__snake_case : List[Any] = np.sum(va * va / (np.linalg.norm(__lowerCamelCase ) * np.linalg.norm(__lowerCamelCase )) )
if np.abs(__lowerCamelCase ) > DOT_THRESHOLD:
__snake_case : List[Any] = (1 - t) * va + t * va
else:
__snake_case : Any = np.arccos(__lowerCamelCase )
__snake_case : Optional[Any] = np.sin(__lowerCamelCase )
__snake_case : int = theta_a * t
__snake_case : List[Any] = np.sin(__lowerCamelCase )
__snake_case : str = np.sin(theta_a - theta_t ) / sin_theta_a
__snake_case : Optional[Any] = sin_theta_t / sin_theta_a
__snake_case : Dict = sa * va + sa * va
if inputs_are_torch:
__snake_case : List[Any] = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase )
return va
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : Any = F.normalize(__lowerCamelCase , dim=-1 )
__snake_case : Optional[Any] = F.normalize(__lowerCamelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
for param in model.parameters():
__snake_case : List[str] = value
class _A ( _lowerCAmelCase ):
def __init__( self : Optional[Any] , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __magic_name__ : CLIPFeatureExtractor , __magic_name__ : Any=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : Tuple=None , ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=__magic_name__ , text_encoder=__magic_name__ , clip_model=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , feature_extractor=__magic_name__ , coca_model=__magic_name__ , coca_tokenizer=__magic_name__ , coca_transform=__magic_name__ , )
__snake_case : str = (
feature_extractor.size
if isinstance(feature_extractor.size , __magic_name__ )
else feature_extractor.size["shortest_edge"]
)
__snake_case : Optional[Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __magic_name__ )
set_requires_grad(self.clip_model , __magic_name__ )
def lowercase__ ( self : Tuple , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> int:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def lowercase__ ( self : int ) -> Dict:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
set_requires_grad(self.vae , __magic_name__ )
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
set_requires_grad(self.vae , __magic_name__ )
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
set_requires_grad(self.unet , __magic_name__ )
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
set_requires_grad(self.unet , __magic_name__ )
def lowercase__ ( self : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : str = min(int(num_inference_steps * strength ) , __magic_name__ )
__snake_case : Optional[Any] = max(num_inference_steps - init_timestep , 0 )
__snake_case : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase__ ( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(__magic_name__ , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(__magic_name__ )}''' )
__snake_case : Optional[int] = image.to(device=__magic_name__ , dtype=__magic_name__ )
if isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Any = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__magic_name__ )
]
__snake_case : List[Any] = torch.cat(__magic_name__ , dim=0 )
else:
__snake_case : Any = self.vae.encode(__magic_name__ ).latent_dist.sample(__magic_name__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case : Union[str, Any] = 0.18215 * init_latents
__snake_case : Optional[Any] = init_latents.repeat_interleave(__magic_name__ , dim=0 )
__snake_case : Union[str, Any] = randn_tensor(init_latents.shape , generator=__magic_name__ , device=__magic_name__ , dtype=__magic_name__ )
# get latents
__snake_case : List[Any] = self.scheduler.add_noise(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : Tuple = init_latents
return latents
def lowercase__ ( self : Optional[int] , __magic_name__ : Dict ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = self.coca_transform(__magic_name__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__snake_case : Union[str, Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__snake_case : List[Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Any , __magic_name__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[Any] = self.feature_extractor.preprocess(__magic_name__ )
__snake_case : Optional[int] = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
__snake_case : Tuple = self.clip_model.get_image_features(__magic_name__ )
__snake_case : str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__magic_name__ )
__snake_case : List[str] = image_embeddings_clip.repeat_interleave(__magic_name__ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowercase__ ( self : str , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : int , ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[Any] = latents.detach().requires_grad_()
__snake_case : Dict = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
# predict the noise residual
__snake_case : Optional[Any] = self.unet(__magic_name__ , __magic_name__ , encoder_hidden_states=__magic_name__ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__snake_case : Tuple = self.scheduler.alphas_cumprod[timestep]
__snake_case : Dict = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case : List[str] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__snake_case : Union[str, Any] = torch.sqrt(__magic_name__ )
__snake_case : Union[str, Any] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __magic_name__ ):
__snake_case : Any = self.scheduler.sigmas[index]
__snake_case : Any = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case : Tuple = 1 / 0.18215 * sample
__snake_case : int = self.vae.decode(__magic_name__ ).sample
__snake_case : str = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case : str = transforms.Resize(self.feature_extractor_size )(__magic_name__ )
__snake_case : List[str] = self.normalize(__magic_name__ ).to(latents.dtype )
__snake_case : List[str] = self.clip_model.get_image_features(__magic_name__ )
__snake_case : Dict = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__magic_name__ )
__snake_case : str = spherical_dist_loss(__magic_name__ , __magic_name__ ).mean() * clip_guidance_scale
__snake_case : Union[str, Any] = -torch.autograd.grad(__magic_name__ , __magic_name__ )[0]
if isinstance(self.scheduler , __magic_name__ ):
__snake_case : str = latents.detach() + grads * (sigma**2)
__snake_case : List[Any] = noise_pred_original
else:
__snake_case : str = noise_pred_original - torch.sqrt(__magic_name__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : List[str] , __magic_name__ : Union[torch.FloatTensor, PIL.Image.Image] , __magic_name__ : Union[torch.FloatTensor, PIL.Image.Image] , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[int] = 5_12 , __magic_name__ : Optional[int] = 5_12 , __magic_name__ : float = 0.6 , __magic_name__ : Optional[int] = 50 , __magic_name__ : Optional[float] = 7.5 , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[float] = 1_00 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : float = 0.8 , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , ) -> Any:
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(__magic_name__ )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(__magic_name__ , torch.Generator ) and batch_size > 1:
__snake_case : int = [generator] + [None] * (batch_size - 1)
__snake_case : Tuple = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
__snake_case : Dict = [x[0] for x in coca_is_none if x[1]]
__snake_case : List[Any] = ", ".join(__magic_name__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__magic_name__ ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__snake_case : List[Any] = self.get_image_description(__magic_name__ )
if style_prompt is None:
if len(__magic_name__ ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__snake_case : Union[str, Any] = self.get_image_description(__magic_name__ )
# get prompt text embeddings for content and style
__snake_case : List[Any] = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=__magic_name__ , return_tensors="""pt""" , )
__snake_case : Union[str, Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__snake_case : Tuple = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=__magic_name__ , return_tensors="""pt""" , )
__snake_case : Any = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__snake_case : Optional[Any] = slerp(__magic_name__ , __magic_name__ , __magic_name__ )
# duplicate text embeddings for each generation per prompt
__snake_case : str = text_embeddings.repeat_interleave(__magic_name__ , dim=0 )
# set timesteps
__snake_case : Tuple = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__snake_case : str = {}
if accepts_offset:
__snake_case : Optional[Any] = 1
self.scheduler.set_timesteps(__magic_name__ , **__magic_name__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__snake_case : str = self.get_timesteps(__magic_name__ , __magic_name__ , self.device )
__snake_case : Dict = timesteps[:1].repeat(__magic_name__ )
# Preprocess image
__snake_case : Dict = preprocess(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : List[Any] = self.prepare_latents(
__magic_name__ , __magic_name__ , __magic_name__ , text_embeddings.dtype , self.device , __magic_name__ )
__snake_case : Dict = preprocess(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : Dict = self.prepare_latents(
__magic_name__ , __magic_name__ , __magic_name__ , text_embeddings.dtype , self.device , __magic_name__ )
__snake_case : Optional[int] = slerp(__magic_name__ , __magic_name__ , __magic_name__ )
if clip_guidance_scale > 0:
__snake_case : List[str] = self.get_clip_image_embeddings(__magic_name__ , __magic_name__ )
__snake_case : Optional[int] = self.get_clip_image_embeddings(__magic_name__ , __magic_name__ )
__snake_case : str = slerp(
__magic_name__ , __magic_name__ , __magic_name__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : Any = content_text_input.input_ids.shape[-1]
__snake_case : Tuple = self.tokenizer([""""""] , padding="""max_length""" , max_length=__magic_name__ , return_tensors="""pt""" )
__snake_case : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__snake_case : Dict = uncond_embeddings.repeat_interleave(__magic_name__ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : Union[str, Any] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : Tuple = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__snake_case : Union[str, Any] = torch.randn(__magic_name__ , generator=__magic_name__ , device="""cpu""" , dtype=__magic_name__ ).to(
self.device )
else:
__snake_case : Optional[Any] = torch.randn(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case : int = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Tuple = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : Optional[int] = {}
if accepts_eta:
__snake_case : List[str] = eta
# check if the scheduler accepts generator
__snake_case : Optional[Any] = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__snake_case : Any = generator
with self.progress_bar(total=__magic_name__ ):
for i, t in enumerate(__magic_name__ ):
# expand the latents if we are doing classifier free guidance
__snake_case : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Union[str, Any] = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
# predict the noise residual
__snake_case : List[Any] = self.unet(__magic_name__ , __magic_name__ , encoder_hidden_states=__magic_name__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__snake_case : Optional[Any] = noise_pred.chunk(2 )
__snake_case : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__snake_case : Dict = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__snake_case : Tuple = self.cond_fn(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , )
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Tuple = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case : int = 1 / 0.18215 * latents
__snake_case : Optional[int] = self.vae.decode(__magic_name__ ).sample
__snake_case : int = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case : str = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__magic_name__ , nsfw_content_detected=__magic_name__ )
| 26 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : str = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "van"
def __init__( self : Optional[int] , lowerCamelCase : Any=224 , lowerCamelCase : str=3 , lowerCamelCase : Any=[7, 3, 3, 3] , lowerCamelCase : Dict=[4, 2, 2, 2] , lowerCamelCase : List[Any]=[64, 128, 320, 512] , lowerCamelCase : str=[3, 3, 12, 3] , lowerCamelCase : Dict=[8, 8, 4, 4] , lowerCamelCase : Any="gelu" , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : Tuple=1E-6 , lowerCamelCase : Optional[int]=1E-2 , lowerCamelCase : int=0.0 , lowerCamelCase : Optional[Any]=0.0 , **lowerCamelCase : Optional[int] , ) -> int:
super().__init__(**lowerCamelCase )
__snake_case : Union[str, Any] = image_size
__snake_case : Any = num_channels
__snake_case : Any = patch_sizes
__snake_case : List[Any] = strides
__snake_case : str = hidden_sizes
__snake_case : Dict = depths
__snake_case : Optional[int] = mlp_ratios
__snake_case : Dict = hidden_act
__snake_case : Union[str, Any] = initializer_range
__snake_case : List[str] = layer_norm_eps
__snake_case : Optional[int] = layer_scale_init_value
__snake_case : List[Any] = drop_path_rate
__snake_case : int = dropout_rate
| 81 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class a_ :
UpperCamelCase_ : str = field(
metadata={"help": "The output directory where the model will be written."} , )
UpperCamelCase_ : str = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} , )
UpperCamelCase_ : str = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=__UpperCamelCase , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
UpperCamelCase_ : Optional[str] = field(
default=__UpperCamelCase , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser((ModelArguments,) )
((lowerCAmelCase__) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
lowerCAmelCase__ = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
lowerCAmelCase__ = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
lowerCAmelCase__ = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
lowerCAmelCase__ = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowerCamelCase__ , decoder_config=lowerCamelCase__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
lowerCAmelCase__ = decoder_config.decoder_start_token_id
lowerCAmelCase__ = decoder_config.pad_token_id
if decoder_start_token_id is None:
lowerCAmelCase__ = decoder_config.bos_token_id
if pad_token_id is None:
lowerCAmelCase__ = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
lowerCAmelCase__ = decoder_config.eos_token_id
lowerCAmelCase__ = decoder_start_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 674 | """simple docstring"""
import os
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = os.path.dirname(os.path.realpath(lowerCamelCase__ ) )
lowerCAmelCase__ = os.path.join(lowerCamelCase__ , """triangle.txt""" )
with open(lowerCamelCase__ ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = []
for line in triangle:
lowerCAmelCase__ = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(lowerCamelCase__ ) )
a.append(lowerCamelCase__ )
for i in range(1 , len(lowerCamelCase__ ) ):
for j in range(len(a[i] ) ):
lowerCAmelCase__ = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCAmelCase__ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCamelCase__ , lowerCamelCase__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 674 | 1 |
from __future__ import annotations
import pandas as pd
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ,a__ : list[int] ,a__ : int ) -> list[int]:
__A : List[str] = [0] * no_of_processes
__A : Dict = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(a__ ):
__A : Tuple = burst_time[i]
__A : Dict = 0
__A : Any = 0
__A : List[Any] = 999999999
__A : List[str] = 0
__A : List[str] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(a__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__A : List[Any] = remaining_time[j]
__A : Union[str, Any] = j
__A : int = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__A : int = remaining_time[short]
if minm == 0:
__A : List[str] = 999999999
if remaining_time[short] == 0:
complete += 1
__A : Optional[Any] = False
# Find finish time of current process
__A : Union[str, Any] = increment_time + 1
# Calculate waiting time
__A : Tuple = finish_time - arrival_time[short]
__A : Tuple = finar - burst_time[short]
if waiting_time[short] < 0:
__A : str = 0
# Increment time
increment_time += 1
return waiting_time
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ,a__ : int ,a__ : list[int] ) -> list[int]:
__A : Any = [0] * no_of_processes
for i in range(a__ ):
__A : Optional[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ,a__ : list[int] ,a__ : int ) -> None:
__A : int = 0
__A : Any = 0
for i in range(a__ ):
__A : Dict = total_waiting_time + waiting_time[i]
__A : List[str] = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print("""Average turn around time =""" ,total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
UpperCAmelCase_ : Tuple = int(input())
UpperCAmelCase_ : Dict = [0] * no_of_processes
UpperCAmelCase_ : Union[str, Any] = [0] * no_of_processes
UpperCAmelCase_ : Optional[int] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
UpperCAmelCase_ , UpperCAmelCase_ : Dict = map(int, input().split())
UpperCAmelCase_ : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCAmelCase_ : Union[str, Any] = burst_time
UpperCAmelCase_ : Any = no_of_processes
UpperCAmelCase_ : int = waiting_time
UpperCAmelCase_ : Optional[Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
UpperCAmelCase_ : Tuple = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 17 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''gpt_bigcode'''
a__ = ['''past_key_values''']
a__ = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :List[Any] , lowerCAmelCase__ :Any=50_257 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :int=12 , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[str]="gelu_pytorch_tanh" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Any=1E-5 , lowerCAmelCase__ :Union[str, Any]=0.0_2 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=50_256 , lowerCAmelCase__ :List[str]=50_256 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=True , **lowerCAmelCase__ :Union[str, Any] , ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = vocab_size
snake_case_ : Any = n_positions
snake_case_ : Any = n_embd
snake_case_ : Optional[Any] = n_layer
snake_case_ : List[Any] = n_head
snake_case_ : Tuple = n_inner
snake_case_ : str = activation_function
snake_case_ : Union[str, Any] = resid_pdrop
snake_case_ : Optional[Any] = embd_pdrop
snake_case_ : Any = attn_pdrop
snake_case_ : List[Any] = layer_norm_epsilon
snake_case_ : Tuple = initializer_range
snake_case_ : int = scale_attn_weights
snake_case_ : Union[str, Any] = use_cache
snake_case_ : Dict = attention_softmax_in_fpaa
snake_case_ : Any = scale_attention_softmax_in_fpaa
snake_case_ : List[str] = multi_query
snake_case_ : List[str] = bos_token_id
snake_case_ : Any = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 653 | 0 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
A_ : int = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 709 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : Optional[int] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def UpperCamelCase (lowercase_: List[str] ) -> Any:
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def UpperCamelCase (lowercase_: Optional[int] ) -> Optional[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def UpperCamelCase (lowercase_: List[str] ) -> Optional[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
A__ : List[Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: int ) -> List[str]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A__ : Tuple = 0
# Doctest custom flag to ignore output.
A_ : Tuple = doctest.register_optionflag('IGNORE_RESULT')
A_ : Dict = doctest.OutputChecker
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self , A__ , A__ , A__ ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , A__ , A__ , A__ )
A_ : str = CustomOutputChecker
A_ : Dict = HfDoctestModule
A_ : Optional[int] = HfDocTestParser
| 64 | 0 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Dict ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = name
_UpperCAmelCase = val
def __str__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : Optional[int] , lowerCamelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.val < other.val
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : int , lowerCamelCase : Optional[int] ) -> int:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = {}
_UpperCAmelCase = self.build_heap(lowerCamelCase )
def __getitem__( self : Any , lowerCamelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.get_value(lowerCamelCase )
def lowerCamelCase ( self : int , lowerCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
return (idx - 1) // 2
def lowerCamelCase ( self : int , lowerCamelCase : Optional[int] ) -> int:
"""simple docstring"""
return idx * 2 + 1
def lowerCamelCase ( self : int , lowerCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return idx * 2 + 2
def lowerCamelCase ( self : Dict , lowerCamelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return self.heap_dict[key]
def lowerCamelCase ( self : Any , lowerCamelCase : Any ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = len(lowerCamelCase ) - 1
_UpperCAmelCase = self.get_parent_idx(lowerCamelCase )
for idx, i in enumerate(lowerCamelCase ):
_UpperCAmelCase = idx
_UpperCAmelCase = i.val
for i in range(lowerCamelCase , -1 , -1 ):
self.sift_down(lowerCamelCase , lowerCamelCase )
return array
def lowerCamelCase ( self : str , lowerCamelCase : List[str] , lowerCamelCase : int ) -> List[str]:
"""simple docstring"""
while True:
_UpperCAmelCase = self.get_left_child_idx(lowerCamelCase ) # noqa: E741
_UpperCAmelCase = self.get_right_child_idx(lowerCamelCase )
_UpperCAmelCase = idx
if l < len(lowerCamelCase ) and array[l] < array[idx]:
_UpperCAmelCase = l
if r < len(lowerCamelCase ) and array[r] < array[smallest]:
_UpperCAmelCase = r
if smallest != idx:
_UpperCAmelCase , _UpperCAmelCase = array[smallest], array[idx]
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
_UpperCAmelCase = smallest
else:
break
def lowerCamelCase ( self : Tuple , lowerCamelCase : Optional[int] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.get_parent_idx(lowerCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
_UpperCAmelCase , _UpperCAmelCase = self.heap[idx], self.heap[p]
_UpperCAmelCase , _UpperCAmelCase = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
_UpperCAmelCase = p
_UpperCAmelCase = self.get_parent_idx(lowerCamelCase )
def lowerCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return self.heap[0]
def lowerCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.heap[-1], self.heap[0]
_UpperCAmelCase , _UpperCAmelCase = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
_UpperCAmelCase = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
self.heap.append(lowerCamelCase )
_UpperCAmelCase = len(self.heap ) - 1
_UpperCAmelCase = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.heap ) == 0
def lowerCamelCase ( self : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] ) -> List[str]:
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
_UpperCAmelCase = new_value
_UpperCAmelCase = new_value
self.sift_up(self.idx_of_element[node] )
__a: str = Node('''R''', -1)
__a: List[Any] = Node('''B''', 6)
__a: List[str] = Node('''A''', 3)
__a: Optional[Any] = Node('''X''', 1)
__a: Tuple = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__a: List[Any] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod() | 108 |
"""simple docstring"""
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
A__ : Optional[int] = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
A__ : Optional[int] = parser.parse_args()
if args.check_lib:
A__ : Union[str, Any] = importlib.import_module('transformers')
A__ : Optional[Any] = Path(transformers_module.__file__).parent
else:
A__ : str = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 353 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class A_ ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = """resnet"""
_UpperCamelCase : int = ["""basic""", """bottleneck"""]
def __init__( self , snake_case=3 , snake_case=64 , snake_case=[256, 512, 1024, 2048] , snake_case=[3, 4, 6, 3] , snake_case="bottleneck" , snake_case="relu" , snake_case=False , snake_case=None , snake_case=None , **snake_case , ):
super().__init__(**snake_case )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
lowercase = num_channels
lowercase = embedding_size
lowercase = hidden_sizes
lowercase = depths
lowercase = layer_type
lowercase = hidden_act
lowercase = downsample_in_first_stage
lowercase = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(snake_case ) + 1 )]
lowercase , lowercase = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Any = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1E-3
| 711 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowercase = set()
return any(
node not in visited and depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for node in graph )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
visited.add(__SCREAMING_SNAKE_CASE )
rec_stk.add(__SCREAMING_SNAKE_CASE )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__SCREAMING_SNAKE_CASE )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 565 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = "bert-generation"
def __init__( self : Any , _lowercase : List[Any]=5_03_58 , _lowercase : str=10_24 , _lowercase : str=24 , _lowercase : int=16 , _lowercase : Any=40_96 , _lowercase : Union[str, Any]="gelu" , _lowercase : str=0.1 , _lowercase : Union[str, Any]=0.1 , _lowercase : Optional[Any]=5_12 , _lowercase : str=0.02 , _lowercase : Dict=1E-12 , _lowercase : int=0 , _lowercase : Optional[int]=2 , _lowercase : Dict=1 , _lowercase : Dict="absolute" , _lowercase : Tuple=True , **_lowercase : str , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
| 49 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase :Dict = 16
lowerCamelCase :List[Any] = 32
def __snake_case ( _UpperCamelCase , _UpperCamelCase = 16 ) -> Optional[Any]:
_a = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_a = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
_a = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_UpperCamelCase , max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a = 16
elif accelerator.mixed_precision != "no":
_a = 8
else:
_a = None
return tokenizer.pad(
_UpperCamelCase , padding='''longest''' , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
_a = DataLoader(
tokenized_datasets['''train'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase , drop_last=_UpperCamelCase )
_a = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> Any:
# Initialize accelerator
_a = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a = config['''lr''']
_a = int(config['''num_epochs'''] )
_a = int(config['''seed'''] )
_a = int(config['''batch_size'''] )
_a = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
_a = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a = batch_size // MAX_GPU_BATCH_SIZE
_a = MAX_GPU_BATCH_SIZE
set_seed(_UpperCamelCase )
_a , _a = get_dataloaders(_UpperCamelCase , _UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a = model.to(accelerator.device )
# Instantiate optimizer
_a = AdamW(params=model.parameters() , lr=_UpperCamelCase )
# Instantiate scheduler
_a = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a = model(**_UpperCamelCase )
_a = outputs.loss
_a = loss / gradient_accumulation_steps
accelerator.backward(_UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**_UpperCamelCase )
_a = outputs.logits.argmax(dim=-1 )
_a , _a = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_UpperCamelCase , references=_UpperCamelCase , )
_a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _UpperCamelCase )
def __snake_case ( ) -> int:
_a = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_UpperCamelCase , default=_UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
_a = parser.parse_args()
_a = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 487 | 0 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
SCREAMING_SNAKE_CASE_: List[Any] ='<<<<<<< This should probably be modified because it mentions: '
SCREAMING_SNAKE_CASE_: List[Any] ='=======\n>>>>>>>\n'
SCREAMING_SNAKE_CASE_: List[str] =[
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
SCREAMING_SNAKE_CASE_: Tuple =[
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def lowerCAmelCase_ ( snake_case_ : Namespace ) -> int:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __A ( UpperCamelCase__ ):
@staticmethod
def _lowercase (__a : ArgumentParser ):
UpperCAmelCase_ = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=__a , required=__a , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=__a , required=__a , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=__a )
def __init__(self : Dict , __a : str , __a : str , *__a : Union[str, Any] ):
UpperCAmelCase_ = get_logger("datasets-cli/converting" )
UpperCAmelCase_ = tfds_path
UpperCAmelCase_ = datasets_directory
def _lowercase (self : Union[str, Any] ):
if os.path.isdir(self._tfds_path ):
UpperCAmelCase_ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
UpperCAmelCase_ = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
UpperCAmelCase_ = os.path.abspath(self._datasets_directory )
self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
if os.path.isdir(self._tfds_path ):
UpperCAmelCase_ = os.listdir(__a )
else:
UpperCAmelCase_ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"""Looking at file {f_name}""" )
UpperCAmelCase_ = os.path.join(__a , __a )
UpperCAmelCase_ = os.path.join(__a , __a )
if not os.path.isfile(__a ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(__a , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = []
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = []
for line in lines:
UpperCAmelCase_ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
UpperCAmelCase_ = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
UpperCAmelCase_ = ""
continue
elif "from absl import logging" in out_line:
UpperCAmelCase_ = "from datasets import logging\n"
elif "getLogger" in out_line:
UpperCAmelCase_ = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
UpperCAmelCase_ = True
UpperCAmelCase_ = list(filter(lambda __a : e in out_line , __a ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__a ) + "\n" )
out_lines.append(__a )
out_lines.append(__a )
continue
else:
for pattern, replacement in TO_CONVERT:
UpperCAmelCase_ = re.sub(__a , __a , __a )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
UpperCAmelCase_ = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , __a )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
UpperCAmelCase_ = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
UpperCAmelCase_ = True
out_lines.append(__a )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
UpperCAmelCase_ = f_name.replace(".py" , "" )
UpperCAmelCase_ = os.path.join(__a , __a )
UpperCAmelCase_ = os.path.join(__a , __a )
os.makedirs(__a , exist_ok=__a )
self._logger.info(f"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__a )
if needs_manual_update:
with_manual_update.append(__a )
with open(__a , "w" , encoding="utf-8" ) as f:
f.writelines(__a )
self._logger.info(f"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
UpperCAmelCase_ = os.path.basename(__a )
UpperCAmelCase_ = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(f"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(__a , __a )
except KeyError:
self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 415 | '''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
SCREAMING_SNAKE_CASE_: Dict ='pt'
elif is_tf_available():
SCREAMING_SNAKE_CASE_: str ='tf'
else:
SCREAMING_SNAKE_CASE_: str ='jax'
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Optional[Any] = PerceiverTokenizer
a__ : Union[str, Any] = False
def _lowercase (self : int ):
super().setUp()
UpperCAmelCase_ = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase (self : Dict ):
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def _lowercase (self : List[str] , **__a : Dict ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def _lowercase (self : str , __a : Dict , __a : Dict=False , __a : List[str]=20 , __a : Optional[int]=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
UpperCAmelCase_ = []
for i in range(len(__a ) ):
try:
UpperCAmelCase_ = tokenizer.decode([i] , clean_up_tokenization_spaces=__a )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase_ = list(filter(lambda __a : re.match(r"^[ a-zA-Z]+$" , t[1] ) , __a ) )
UpperCAmelCase_ = list(filter(lambda __a : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__a ) , __a ) )
if max_length is not None and len(__a ) > max_length:
UpperCAmelCase_ = toks[:max_length]
if min_length is not None and len(__a ) < min_length and len(__a ) > 0:
while len(__a ) < min_length:
UpperCAmelCase_ = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase_ = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase_ = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
if " " not in output_txt and len(__a ) > 1:
UpperCAmelCase_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__a )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__a )
)
if with_prefix_space:
UpperCAmelCase_ = " " + output_txt
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
return output_txt, output_ids
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.perceiver_tokenizer
UpperCAmelCase_ = "Unicode €."
UpperCAmelCase_ = tokenizer(__a )
UpperCAmelCase_ = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"] , __a )
# decoding
UpperCAmelCase_ = tokenizer.decode(__a )
self.assertEqual(__a , "[CLS]Unicode €.[SEP]" )
UpperCAmelCase_ = tokenizer("e è é ê ë" )
UpperCAmelCase_ = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"] , __a )
# decoding
UpperCAmelCase_ = tokenizer.decode(__a )
self.assertEqual(__a , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.perceiver_tokenizer
UpperCAmelCase_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
UpperCAmelCase_ = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
UpperCAmelCase_ = tokenizer(__a , padding=__a , return_tensors=__a )
self.assertIsInstance(__a , __a )
if FRAMEWORK != "jax":
UpperCAmelCase_ = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__a , __a )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.perceiver_tokenizer
UpperCAmelCase_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase_ = tokenizer(__a , padding=__a , return_tensors=__a )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , __a )
self.assertIn("attention_mask" , __a )
self.assertNotIn("decoder_input_ids" , __a )
self.assertNotIn("decoder_attention_mask" , __a )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.perceiver_tokenizer
UpperCAmelCase_ = [
"Summary of the text.",
"Another summary.",
]
UpperCAmelCase_ = tokenizer(
text_target=__a , max_length=32 , padding="max_length" , truncation=__a , return_tensors=__a )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _lowercase (self : Any ):
# safety check on max_len default value so we are sure the test works
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = " He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
tokenizer.save_pretrained(__a )
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(__a )
UpperCAmelCase_ = after_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
shutil.rmtree(__a )
UpperCAmelCase_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
UpperCAmelCase_ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
tokenizer.save_pretrained(__a )
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(__a )
UpperCAmelCase_ = after_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(__a , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__a )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__a )
with open(os.path.join(__a , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ = json.load(__a )
with open(os.path.join(__a , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ = json.load(__a )
UpperCAmelCase_ = [f"""<extra_id_{i}>""" for i in range(125 )]
UpperCAmelCase_ = added_tokens_extra_ids + [
"an_additional_special_token"
]
UpperCAmelCase_ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(__a , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__a , __a )
with open(os.path.join(__a , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__a , __a )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ = tokenizer_class.from_pretrained(
__a , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=__a )]
UpperCAmelCase_ = tokenizer_class.from_pretrained(
__a , additional_special_tokens=__a , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def _lowercase (self : int ):
UpperCAmelCase_ = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , "�" )
def _lowercase (self : Optional[int] ):
pass
def _lowercase (self : List[str] ):
pass
def _lowercase (self : Tuple ):
pass
def _lowercase (self : List[Any] ):
pass
def _lowercase (self : int ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
UpperCAmelCase_ = self.get_tokenizers(fast=__a , do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase_ = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(__a )
self.assertIsInstance(__a , __a )
| 415 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : str = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 679 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a : Optional[int] = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = R"\w+[.]\d+"
UpperCAmelCase : Dict = re.findall(__magic_name__ , __magic_name__ )
for pat in pats:
UpperCAmelCase : Tuple = key.replace(__magic_name__ , "_".join(pat.split("." ) ) )
return key
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase : Dict = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase : int = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=42 ):
'''simple docstring'''
UpperCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase : Tuple = flax_model.init_weights(PRNGKey(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = flatten_dict(__magic_name__ )
UpperCAmelCase : List[str] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase : Tuple = rename_key(__magic_name__ )
UpperCAmelCase : List[str] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase : Optional[int] = rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCAmelCase : Optional[int] = jnp.asarray(__magic_name__ )
return unflatten_dict(__magic_name__ )
| 679 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = 10
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = [1, 2, 3, 4]
UpperCamelCase__ : str = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowerCamelCase__ , self.block_size , 0 ) , lowerCamelCase__ )
def UpperCAmelCase__ ( self : Any ) -> str:
'''simple docstring'''
UpperCamelCase__ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCamelCase__ : List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCamelCase__ , self.block_size , 0 ) , lowerCamelCase__ )
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
UpperCamelCase__ : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCamelCase__ : List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCamelCase__ , self.block_size , 0 ) , lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : str = '''It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'''
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = process_story(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , [] )
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ''''''
UpperCamelCase__ , UpperCamelCase__ : List[str] = process_story(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , [] )
self.assertEqual(lowerCamelCase__ , [] )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Any = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCamelCase__ , UpperCamelCase__ : List[Any] = process_story(lowerCamelCase__ )
UpperCamelCase__ : Dict = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = ['''It was the best of times.''']
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : str = torch.tensor([1, 2, 3, 4] )
UpperCamelCase__ : Dict = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowerCamelCase__ , 0 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
UpperCamelCase__ : Tuple = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCamelCase__ , 23 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ : List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
UpperCamelCase__ : Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCamelCase__ , 1 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : str = 101
UpperCamelCase__ : Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
UpperCamelCase__ : List[str] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
UpperCamelCase__ : List[str] = compute_token_type_ids(lowerCamelCase__ , lowerCamelCase__ )
np.testing.assert_array_equal(lowerCamelCase__ , lowerCamelCase__ )
| 720 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model"}
__UpperCamelCase : List[Any] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
__UpperCamelCase : int = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
__UpperCamelCase : int = "▁"
class __magic_name__ ( __lowerCAmelCase):
A: Tuple = VOCAB_FILES_NAMES
A: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A: Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple="<s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : int="</s>" , lowerCamelCase__ : Optional[Any]="<s>" , lowerCamelCase__ : List[str]="<unk>" , lowerCamelCase__ : str="<pad>" , lowerCamelCase__ : int="<mask>" , lowerCamelCase__ : Optional[Dict[str, Any]] = None , **lowerCamelCase__ : Any , ) -> None:
'''simple docstring'''
UpperCamelCase__ : str = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
UpperCamelCase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
UpperCamelCase__ : Any = vocab_file
UpperCamelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
UpperCamelCase__ : Any = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
UpperCamelCase__ : Tuple = len(self.sp_model ) - 1
UpperCamelCase__ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ : List[str] = [self.cls_token_id]
UpperCamelCase__ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCamelCase__ : int = [self.sep_token_id]
UpperCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Tuple = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase__ : List[str] = self.sp_model.PieceToId(lowerCamelCase__ )
return spm_id if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : int ) -> List[str]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(lowerCamelCase__ )
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Any = ''''''
UpperCamelCase__ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
UpperCamelCase__ : str = True
UpperCamelCase__ : Tuple = []
else:
current_sub_tokens.append(lowerCamelCase__ )
UpperCamelCase__ : Any = False
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def __getstate__( self : Tuple ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : str = self.__dict__.copy()
UpperCamelCase__ : int = None
return state
def __setstate__( self : Tuple , lowerCamelCase__ : Any ) -> str:
'''simple docstring'''
UpperCamelCase__ : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ : Optional[Any] = {}
UpperCamelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase__ : Any = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , '''wb''' ) as fi:
UpperCamelCase__ : int = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 106 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 182 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = 10
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = [1, 2, 3, 4]
UpperCAmelCase__ : Tuple = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_lowerCamelCase , self.block_size , 0 ) , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase__ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_lowerCamelCase , self.block_size , 0 ) , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase__ : Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_lowerCamelCase , self.block_size , 0 ) , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
UpperCAmelCase__ , UpperCAmelCase__ : Any = process_story(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , [] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = """"""
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = process_story(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , [] )
self.assertEqual(_lowerCamelCase , [] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = process_story(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : List[Any] = ["""It was the best of times."""]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = torch.tensor([1, 2, 3, 4] )
UpperCAmelCase__ : str = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_lowerCamelCase , 0 ).numpy() , expected.numpy() )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
UpperCAmelCase__ : Union[str, Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_lowerCamelCase , 23 ).numpy() , expected.numpy() )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
UpperCAmelCase__ : Any = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_lowerCamelCase , 1 ).numpy() , expected.numpy() )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = 101
UpperCAmelCase__ : Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
UpperCAmelCase__ : Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
UpperCAmelCase__ : int = compute_token_type_ids(_lowerCamelCase , _lowerCamelCase )
np.testing.assert_array_equal(_lowerCamelCase , _lowerCamelCase )
| 182 | 1 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __UpperCAmelCase ( a__ ):
'''simple docstring'''
lowercase : Any = ["vqvae"]
def __init__( self , _A , _A , _A , _A , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A , mel=_A , vqvae=_A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return 5_0 if isinstance(self.scheduler , _A ) else 1_0_0_0
@torch.no_grad()
def __call__( self , _A = 1 , _A = None , _A = None , _A = 0 , _A = 0 , _A = None , _A = None , _A = 0 , _A = 0 , _A = None , _A = 0 , _A = None , _A = None , _A=True , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =steps or self.get_default_steps()
self.scheduler.set_timesteps(_A )
_SCREAMING_SNAKE_CASE =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_SCREAMING_SNAKE_CASE =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_SCREAMING_SNAKE_CASE =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_A , device=self.device , )
_SCREAMING_SNAKE_CASE =noise
_SCREAMING_SNAKE_CASE =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_A , _A )
_SCREAMING_SNAKE_CASE =self.mel.audio_slice_to_image(_A )
_SCREAMING_SNAKE_CASE =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
_SCREAMING_SNAKE_CASE =(input_image / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_SCREAMING_SNAKE_CASE =self.vqvae.encode(torch.unsqueeze(_A , 0 ) ).latent_dist.sample(
generator=_A )[0]
_SCREAMING_SNAKE_CASE =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_SCREAMING_SNAKE_CASE =self.scheduler.add_noise(_A , _A , self.scheduler.timesteps[start_step - 1] )
_SCREAMING_SNAKE_CASE =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_SCREAMING_SNAKE_CASE =int(mask_start_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE =int(mask_end_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE =self.scheduler.add_noise(_A , _A , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _A ):
_SCREAMING_SNAKE_CASE =self.unet(_A , _A , _A )['sample']
else:
_SCREAMING_SNAKE_CASE =self.unet(_A , _A )['sample']
if isinstance(self.scheduler , _A ):
_SCREAMING_SNAKE_CASE =self.scheduler.step(
model_output=_A , timestep=_A , sample=_A , eta=_A , generator=_A , )['prev_sample']
else:
_SCREAMING_SNAKE_CASE =self.scheduler.step(
model_output=_A , timestep=_A , sample=_A , generator=_A , )['prev_sample']
if mask is not None:
if mask_start > 0:
_SCREAMING_SNAKE_CASE =mask[:, step, :, :mask_start]
if mask_end > 0:
_SCREAMING_SNAKE_CASE =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_SCREAMING_SNAKE_CASE =1 / self.vqvae.config.scaling_factor * images
_SCREAMING_SNAKE_CASE =self.vqvae.decode(_A )['sample']
_SCREAMING_SNAKE_CASE =(images / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE =images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_SCREAMING_SNAKE_CASE =(images * 2_5_5).round().astype('''uint8''' )
_SCREAMING_SNAKE_CASE =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_A , mode='''RGB''' ).convert('''L''' ) for _ in images) )
_SCREAMING_SNAKE_CASE =[self.mel.image_to_audio(_A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_A )[:, np.newaxis, :] ) , **ImagePipelineOutput(_A ) )
@torch.no_grad()
def UpperCamelCase_ ( self , _A , _A = 5_0 ):
'''simple docstring'''
assert isinstance(self.scheduler , _A )
self.scheduler.set_timesteps(_A )
_SCREAMING_SNAKE_CASE =np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
_SCREAMING_SNAKE_CASE =(sample / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE =torch.Tensor(_A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_SCREAMING_SNAKE_CASE =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_SCREAMING_SNAKE_CASE =self.scheduler.alphas_cumprod[t]
_SCREAMING_SNAKE_CASE =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_SCREAMING_SNAKE_CASE =1 - alpha_prod_t
_SCREAMING_SNAKE_CASE =self.unet(_A , _A )['sample']
_SCREAMING_SNAKE_CASE =(1 - alpha_prod_t_prev) ** 0.5 * model_output
_SCREAMING_SNAKE_CASE =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_SCREAMING_SNAKE_CASE =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =acos(torch.dot(torch.flatten(_A ) , torch.flatten(_A ) ) / torch.norm(_A ) / torch.norm(_A ) )
return sin((1 - alpha) * theta ) * xa / sin(_A ) + sin(alpha * theta ) * xa / sin(_A )
| 716 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Union[str, Any] = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 165 | 0 |
'''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ ) ->bool:
if num < 0:
return False
lowercase_ = num
lowercase_ = 0
while num > 0:
lowercase_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 451 | '''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case = 16
__snake_case = 32
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 16 ) ->str:
lowercase_ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowercase_ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(SCREAMING_SNAKE_CASE_ ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase_ = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(SCREAMING_SNAKE_CASE_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase_ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase_ = 16
elif accelerator.mixed_precision != "no":
lowercase_ = 8
else:
lowercase_ = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding="""longest""" , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowercase_ = DataLoader(
tokenized_datasets["""train"""] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
lowercase_ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__snake_case = mocked_dataloaders # noqa: F811
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Dict:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , SCREAMING_SNAKE_CASE_ ) == "1":
lowercase_ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowercase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
lowercase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ = config["""lr"""]
lowercase_ = int(config["""num_epochs"""] )
lowercase_ = int(config["""seed"""] )
lowercase_ = int(config["""batch_size"""] )
set_seed(SCREAMING_SNAKE_CASE_ )
lowercase_ , lowercase_ = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
lowercase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase_ = batch_size // MAX_GPU_BATCH_SIZE
lowercase_ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase_ = model.to(accelerator.device )
# Instantiate optimizer
lowercase_ = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
lowercase_ = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowercase_ = os.path.split(SCREAMING_SNAKE_CASE_ )[-1].split(""".""" )[0]
accelerator.init_trackers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowercase_ = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase_ = model(**SCREAMING_SNAKE_CASE_ )
lowercase_ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowercase_ = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowercase_ = model(**SCREAMING_SNAKE_CASE_ )
lowercase_ = outputs.logits.argmax(dim=-1 )
lowercase_ , lowercase_ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
lowercase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(SCREAMING_SNAKE_CASE_ ),
"""epoch""": epoch,
} , step=SCREAMING_SNAKE_CASE_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def A_ ( ) ->Union[str, Any]:
lowercase_ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=SCREAMING_SNAKE_CASE_ , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
lowercase_ = parser.parse_args()
lowercase_ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 451 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : int = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class _UpperCamelCase (a_ ):
snake_case_ = """luke"""
def __init__( self , __UpperCamelCase=5_0_2_6_7 , __UpperCamelCase=5_0_0_0_0_0 , __UpperCamelCase=7_6_8 , __UpperCamelCase=2_5_6 , __UpperCamelCase=1_2 , __UpperCamelCase=1_2 , __UpperCamelCase=3_0_7_2 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_1_2 , __UpperCamelCase=2 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1e-12 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , **__UpperCamelCase , )-> Any:
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = entity_vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = entity_emb_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = use_entity_aware_attention
__lowerCAmelCase = classifier_dropout
| 719 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class _UpperCamelCase (a_ ):
snake_case_ = """swin2sr"""
snake_case_ = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __UpperCamelCase=6_4 , __UpperCamelCase=1 , __UpperCamelCase=3 , __UpperCamelCase=1_8_0 , __UpperCamelCase=[6, 6, 6, 6, 6, 6] , __UpperCamelCase=[6, 6, 6, 6, 6, 6] , __UpperCamelCase=8 , __UpperCamelCase=2.0 , __UpperCamelCase=True , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase="gelu" , __UpperCamelCase=False , __UpperCamelCase=0.0_2 , __UpperCamelCase=1e-5 , __UpperCamelCase=2 , __UpperCamelCase=1.0 , __UpperCamelCase="1conv" , __UpperCamelCase="pixelshuffle" , **__UpperCamelCase , )-> Tuple:
super().__init__(**__UpperCamelCase )
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embed_dim
__lowerCAmelCase = depths
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = num_heads
__lowerCAmelCase = window_size
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = qkv_bias
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = hidden_act
__lowerCAmelCase = use_absolute_embeddings
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = initializer_range
__lowerCAmelCase = upscale
__lowerCAmelCase = img_range
__lowerCAmelCase = resi_connection
__lowerCAmelCase = upsampler
| 290 | 0 |
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
A_ = str(SCREAMING_SNAKE_CASE )
A_ = ''''''.join(sorted(SCREAMING_SNAKE_CASE ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _lowerCamelCase ( SCREAMING_SNAKE_CASE = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError('''solution() only accepts values from 0 to 100''' )
A_ = 0
A_ = 1
while True:
if check_bouncy(SCREAMING_SNAKE_CASE ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'{solution(99)}')
| 203 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__lowercase = logging.get_logger(__name__)
__lowercase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowercase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowercase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowercase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
__lowercase = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
__lowercase = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
__lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
__lowercase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
__lowercase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class _lowercase ( __lowerCamelCase ):
_lowercase : List[Any] = VOCAB_FILES_NAMES
_lowercase : Tuple = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : int = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _lowercase ( __lowerCamelCase ):
_lowercase : Union[str, Any] = VOCAB_FILES_NAMES
_lowercase : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowercase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
__lowercase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
__lowercase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(__lowerCamelCase )
class _lowercase :
def __call__( self : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Union[bool, str] = False , lowerCamelCase__ : Union[bool, str] = False , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : Optional[bool] = None , **lowerCamelCase__ : Optional[int] , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
elif titles is None or texts is None:
A_ = titles if texts is None else texts
return super().__call__(
lowerCamelCase__ , lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
A_ = titles if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) else [titles]
A_ = texts if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) else [texts]
A_ = len(lowerCamelCase__ )
A_ = questions if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) else [questions] * n_passages
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
F"There should be as many titles than texts but got {len(lowerCamelCase__ )} titles and {len(lowerCamelCase__ )} texts." )
A_ = super().__call__(lowerCamelCase__ , lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )['''input_ids''']
A_ = super().__call__(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )['''input_ids''']
A_ = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase__ , lowerCamelCase__ )
]
}
if return_attention_mask is not False:
A_ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A_ = attention_mask
return self.pad(lowerCamelCase__ , padding=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
def UpperCamelCase ( self : int , lowerCamelCase__ : BatchEncoding , lowerCamelCase__ : DPRReaderOutput , lowerCamelCase__ : int = 1_6 , lowerCamelCase__ : int = 6_4 , lowerCamelCase__ : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
A_ = reader_input['''input_ids''']
A_ ,A_ ,A_ = reader_output[:3]
A_ = len(lowerCamelCase__ )
A_ = sorted(range(lowerCamelCase__ ) , reverse=lowerCamelCase__ , key=relevance_logits.__getitem__ )
A_ = []
for doc_id in sorted_docs:
A_ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A_ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A_ = sequence_ids.index(self.pad_token_id )
else:
A_ = len(lowerCamelCase__ )
A_ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCamelCase__ , top_spans=lowerCamelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCamelCase__ , start_index=lowerCamelCase__ , end_index=lowerCamelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCamelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase ( self : str , lowerCamelCase__ : List[int] , lowerCamelCase__ : List[int] , lowerCamelCase__ : int , lowerCamelCase__ : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
A_ = []
for start_index, start_score in enumerate(lowerCamelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A_ = sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x[1] , reverse=lowerCamelCase__ )
A_ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]" )
A_ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__lowerCamelCase )
class _lowercase ( __lowerCamelCase,__lowerCamelCase ):
_lowercase : Any = VOCAB_FILES_NAMES
_lowercase : Any = READER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : Union[str, Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Union[str, Any] = READER_PRETRAINED_INIT_CONFIGURATION
_lowercase : List[Any] = ['input_ids', 'attention_mask']
| 203 | 1 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a_ = "sshleifer/bart-tiny-random"
a_ = "patrickvonplaten/t5-tiny-random"
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case__ ( self):
return AutoConfig.from_pretrained(lowercase_)
def snake_case__ ( self):
snake_case_ : Dict = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def snake_case__ ( self):
snake_case_ : int = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=lowercase_)
def snake_case__ ( self):
snake_case_ : str = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=lowercase_)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def snake_case__ ( self):
snake_case_ : Optional[int] = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def snake_case__ ( self):
with self.assertRaises(lowercase_):
create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=lowercase_ , d=lowercase_)
| 703 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 16
a_ = 32
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE = 1_6 ):
"""simple docstring"""
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case_ : int = DatasetDict(
{
"train": dataset["train"].select(__SCREAMING_SNAKE_CASE ),
"validation": dataset["train"].select(__SCREAMING_SNAKE_CASE ),
"test": dataset["validation"],
} )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : str = tokenizer(examples["sentence1"], examples["sentence2"], truncation=__SCREAMING_SNAKE_CASE, max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : List[Any] = datasets.map(
__SCREAMING_SNAKE_CASE, batched=__SCREAMING_SNAKE_CASE, remove_columns=["idx", "sentence1", "sentence2"], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : Dict = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(__SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : Optional[int] = 1_6
elif accelerator.mixed_precision != "no":
snake_case_ : Tuple = 8
else:
snake_case_ : Union[str, Any] = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE, padding="longest", max_length=__SCREAMING_SNAKE_CASE, pad_to_multiple_of=__SCREAMING_SNAKE_CASE, return_tensors="pt", )
# Instantiate dataloaders.
snake_case_ : Optional[int] = DataLoader(
tokenized_datasets["train"], shuffle=__SCREAMING_SNAKE_CASE, collate_fn=__SCREAMING_SNAKE_CASE, batch_size=__SCREAMING_SNAKE_CASE )
snake_case_ : int = DataLoader(
tokenized_datasets["validation"], shuffle=__SCREAMING_SNAKE_CASE, collate_fn=__SCREAMING_SNAKE_CASE, batch_size=__SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = DataLoader(
tokenized_datasets["test"], shuffle=__SCREAMING_SNAKE_CASE, collate_fn=__SCREAMING_SNAKE_CASE, batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader, test_dataloader
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
# Download the dataset
snake_case_ : Tuple = load_dataset("glue", "mrpc" )
# Create our splits
snake_case_ : Union[str, Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
snake_case_ : Optional[Any] = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : Optional[Any] = config["lr"]
snake_case_ : str = int(config["num_epochs"] )
snake_case_ : Tuple = int(config["seed"] )
snake_case_ : Optional[Any] = int(config["batch_size"] )
snake_case_ : List[Any] = evaluate.load("glue", "mrpc" )
# If the batch size is too big we use gradient accumulation
snake_case_ : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case_ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
snake_case_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(__SCREAMING_SNAKE_CASE )
# New Code #
# Create our folds:
snake_case_ : int = kfold.split(np.zeros(datasets["train"].num_rows ), datasets["train"]["label"] )
snake_case_ : Optional[int] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ , snake_case_ : Tuple = get_fold_dataloaders(
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : Any = AdamW(params=model.parameters(), lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
snake_case_ : List[str] = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE, num_warmup_steps=1_0_0, num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : str = accelerator.prepare(
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ : List[Any] = model(**__SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = outputs.loss
snake_case_ : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(__SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : Dict = model(**__SCREAMING_SNAKE_CASE )
snake_case_ : str = outputs.logits.argmax(dim=-1 )
snake_case_ , snake_case_ : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE, references=__SCREAMING_SNAKE_CASE, )
snake_case_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:', __SCREAMING_SNAKE_CASE )
# New Code #
# We also run predictions on the test set at the very end
snake_case_ : Any = []
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : int = model(**__SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = outputs.logits
snake_case_ , snake_case_ : List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
snake_case_ : List[Any] = torch.cat(__SCREAMING_SNAKE_CASE, dim=0 )
snake_case_ : Any = torch.stack(__SCREAMING_SNAKE_CASE, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
snake_case_ : Tuple = metric.compute(predictions=__SCREAMING_SNAKE_CASE, references=__SCREAMING_SNAKE_CASE )
accelerator.print("Average test metrics from all folds:", __SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( ):
"""simple docstring"""
snake_case_ : Tuple = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision", type=__SCREAMING_SNAKE_CASE, default=__SCREAMING_SNAKE_CASE, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
# New Code #
parser.add_argument("--num_folds", type=__SCREAMING_SNAKE_CASE, default=3, help="The number of splits to perform across the dataset" )
snake_case_ : List[Any] = parser.parse_args()
snake_case_ : Optional[Any] = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 92 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
_UpperCamelCase = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Dict:
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCAmelCase__ : Optional[Any] = "lm_head"
lowerCAmelCase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
lowerCAmelCase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
lowerCAmelCase__ : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCAmelCase__ : int = value
elif weight_type == "weight_g":
lowerCAmelCase__ : int = value
elif weight_type == "weight_v":
lowerCAmelCase__ : str = value
elif weight_type == "bias":
lowerCAmelCase__ : Union[str, Any] = value
else:
lowerCAmelCase__ : Optional[int] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : str = fairseq_model.state_dict()
lowerCAmelCase__ : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ : Any = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
lowerCAmelCase__ : Dict = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ : Optional[int] = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCAmelCase__ : Tuple = True
if "*" in mapped_key:
lowerCAmelCase__ : Any = name.split(_lowerCAmelCase )[0].split("." )[-2]
lowerCAmelCase__ : Any = mapped_key.replace("*" , _lowerCAmelCase )
if "weight_g" in name:
lowerCAmelCase__ : List[Any] = "weight_g"
elif "weight_v" in name:
lowerCAmelCase__ : List[str] = "weight_v"
elif "bias" in name:
lowerCAmelCase__ : Any = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ : Optional[int] = "weight"
else:
lowerCAmelCase__ : Any = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> str:
lowerCAmelCase__ : int = full_name.split("conv_layers." )[-1]
lowerCAmelCase__ : str = name.split("." )
lowerCAmelCase__ : Union[str, Any] = int(items[0] )
lowerCAmelCase__ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCAmelCase__ : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCAmelCase__ : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCAmelCase__ : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCAmelCase__ : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=True ) -> Dict:
if config_path is not None:
lowerCAmelCase__ : List[Any] = UniSpeechConfig.from_pretrained(_lowerCAmelCase )
else:
lowerCAmelCase__ : Tuple = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCAmelCase__ : str = Dictionary.load_from_json(_lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ : str = target_dict.pad_index
lowerCAmelCase__ : Dict = target_dict.bos_index
lowerCAmelCase__ : List[Any] = target_dict.eos_index
lowerCAmelCase__ : Dict = len(target_dict.symbols )
lowerCAmelCase__ : Tuple = os.path.join(_lowerCAmelCase , "vocab.json" )
if not os.path.isdir(_lowerCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCAmelCase ) )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
lowerCAmelCase__ : Any = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ : Tuple = 4_2
lowerCAmelCase__ : Dict = 4_3
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase__ : Union[str, Any] = WavaVecaPhonemeCTCTokenizer(
_lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCAmelCase , )
lowerCAmelCase__ : Dict = True if config.feat_extract_norm == "layer" else False
lowerCAmelCase__ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
lowerCAmelCase__ : Tuple = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
lowerCAmelCase__ : int = UniSpeechForCTC(_lowerCAmelCase )
else:
lowerCAmelCase__ : Any = UniSpeechForPreTraining(_lowerCAmelCase )
if is_finetuned:
lowerCAmelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
lowerCAmelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCAmelCase__ : List[str] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
hf_unispeech.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_UpperCamelCase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 453 |
from math import sqrt
def __snake_case ( _lowerCAmelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __snake_case ( _lowerCAmelCase : int = 10001 ) -> int:
A_ : Any = 0
A_ : Tuple = 1
while count != nth and number < 3:
number += 1
if is_prime(_lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(_lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 454 | 0 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__=0.2, snake_case__=0.2 ) -> Tuple:
"""simple docstring"""
lowercase_ : List[str] = bp_numa
lowercase_ : str = bp_numa
lowercase_ : Optional[int] = bp_numa
lowercase_ : Optional[int] = conva_get[:2]
lowercase_ : Tuple = conva_get[2]
lowercase_ : List[Any] = size_pa
lowercase_ : Any = rate_w
lowercase_ : str = rate_t
lowercase_ : List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0], self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 )
lowercase_ : Optional[Any] = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 )
lowercase_ : Tuple = -2 * np.random.rand(self.conva[1] ) + 1
lowercase_ : Dict = -2 * np.random.rand(self.num_bpa ) + 1
lowercase_ : Optional[Any] = -2 * np.random.rand(self.num_bpa ) + 1
def snake_case__ ( self, snake_case__ ) -> Dict:
"""simple docstring"""
# save model dict with pickle
lowercase_ : Optional[int] = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(snake_case__, """wb""" ) as f:
pickle.dump(snake_case__, snake_case__ )
print(f"""Model saved: {save_path}""" )
@classmethod
def snake_case__ ( cls, snake_case__ ) -> Optional[int]:
"""simple docstring"""
# read saved model
with open(snake_case__, """rb""" ) as f:
lowercase_ : Tuple = pickle.load(snake_case__ ) # noqa: S301
lowercase_ : Dict = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
lowercase_ : Optional[Any] = model_dic.get("""size_pooling1""" )
lowercase_ : Optional[Any] = model_dic.get("""num_bp1""" )
lowercase_ : Any = model_dic.get("""num_bp2""" )
lowercase_ : Optional[int] = model_dic.get("""num_bp3""" )
lowercase_ : Any = model_dic.get("""rate_weight""" )
lowercase_ : Optional[Any] = model_dic.get("""rate_thre""" )
# create model instance
lowercase_ : int = CNN(snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ )
# modify model parameter
lowercase_ : Union[str, Any] = model_dic.get("""w_conv1""" )
lowercase_ : Optional[int] = model_dic.get("""wkj""" )
lowercase_ : List[Any] = model_dic.get("""vji""" )
lowercase_ : List[str] = model_dic.get("""thre_conv1""" )
lowercase_ : Optional[Any] = model_dic.get("""thre_bp2""" )
lowercase_ : Union[str, Any] = model_dic.get("""thre_bp3""" )
return conv_ins
def snake_case__ ( self, snake_case__ ) -> str:
"""simple docstring"""
return 1 / (1 + np.exp(-1 * x ))
def snake_case__ ( self, snake_case__ ) -> Any:
"""simple docstring"""
return round(snake_case__, 3 )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
# convolution process
lowercase_ : List[Any] = convs[0]
lowercase_ : List[str] = convs[1]
lowercase_ : Any = np.shape(snake_case__ )[0]
# get the data slice of original image data, data_focus
lowercase_ : Tuple = []
for i_focus in range(0, size_data - size_conv + 1, snake_case__ ):
for j_focus in range(0, size_data - size_conv + 1, snake_case__ ):
lowercase_ : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(snake_case__ )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase_ : List[str] = []
lowercase_ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(snake_case__ ):
lowercase_ : Optional[Any] = []
for i_focus in range(len(snake_case__ ) ):
lowercase_ : int = (
np.sum(np.multiply(data_focus[i_focus], w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(snake_case__ ) )
lowercase_ : str = np.asmatrix(snake_case__ ).reshape(
snake_case__, snake_case__ )
data_featuremap.append(snake_case__ )
# expanding the data slice to One dimenssion
lowercase_ : Union[str, Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(snake_case__ ) )
lowercase_ : List[str] = np.asarray(snake_case__ )
return focus_list, data_featuremap
def snake_case__ ( self, snake_case__, snake_case__, snake_case__="average_pool" ) -> Optional[Any]:
"""simple docstring"""
# pooling process
lowercase_ : Optional[int] = len(featuremaps[0] )
lowercase_ : str = int(size_map / size_pooling )
lowercase_ : Optional[Any] = []
for i_map in range(len(snake_case__ ) ):
lowercase_ : Tuple = featuremaps[i_map]
lowercase_ : Any = []
for i_focus in range(0, snake_case__, snake_case__ ):
for j_focus in range(0, snake_case__, snake_case__ ):
lowercase_ : Tuple = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(snake_case__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(snake_case__ ) )
lowercase_ : Optional[int] = np.asmatrix(snake_case__ ).reshape(snake_case__, snake_case__ )
featuremap_pooled.append(snake_case__ )
return featuremap_pooled
def snake_case__ ( self, snake_case__ ) -> Optional[int]:
"""simple docstring"""
# expanding three dimension data to one dimension list
lowercase_ : str = []
for i in range(len(snake_case__ ) ):
lowercase_ : Optional[Any] = np.shape(data[i] )
lowercase_ : Dict = data[i].reshape(1, shapes[0] * shapes[1] )
lowercase_ : Dict = data_listed.getA().tolist()[0]
data_expanded.extend(snake_case__ )
lowercase_ : Optional[int] = np.asarray(snake_case__ )
return data_expanded
def snake_case__ ( self, snake_case__ ) -> Tuple:
"""simple docstring"""
# expanding matrix to one dimension list
lowercase_ : Optional[int] = np.asarray(snake_case__ )
lowercase_ : Any = np.shape(snake_case__ )
lowercase_ : Dict = data_mat.reshape(1, shapes[0] * shapes[1] )
return data_expanded
def snake_case__ ( self, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> Tuple:
"""simple docstring"""
lowercase_ : int = []
lowercase_ : str = 0
for i_map in range(snake_case__ ):
lowercase_ : Dict = np.ones((size_map, size_map) )
for i in range(0, snake_case__, snake_case__ ):
for j in range(0, snake_case__, snake_case__ ):
lowercase_ : Tuple = pd_pool[
i_pool
]
lowercase_ : Tuple = i_pool + 1
lowercase_ : List[str] = np.multiply(
snake_case__, np.multiply(out_map[i_map], (1 - out_map[i_map]) ) )
pd_all.append(snake_case__ )
return pd_all
def snake_case__ ( self, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__=bool ) -> Any:
"""simple docstring"""
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(snake_case__ )) )
print((""" - - Shape: Teach_Data """, np.shape(snake_case__ )) )
lowercase_ : Tuple = 0
lowercase_ : str = []
lowercase_ : int = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
lowercase_ : str = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(snake_case__ ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase_ : Dict = np.asmatrix(datas_train[p] )
lowercase_ : List[str] = np.asarray(datas_teach[p] )
lowercase_ , lowercase_ : Tuple = self.convolute(
snake_case__, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
lowercase_ : Union[str, Any] = self.pooling(snake_case__, self.size_poolinga )
lowercase_ : List[str] = np.shape(snake_case__ )
lowercase_ : Tuple = self._expand(snake_case__ )
lowercase_ : Dict = data_bp_input
lowercase_ : Union[str, Any] = np.dot(snake_case__, self.vji.T ) - self.thre_bpa
lowercase_ : Optional[Any] = self.sig(snake_case__ )
lowercase_ : Union[str, Any] = np.dot(snake_case__, self.wkj.T ) - self.thre_bpa
lowercase_ : str = self.sig(snake_case__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase_ : Dict = np.multiply(
(data_teach - bp_outa), np.multiply(snake_case__, (1 - bp_outa) ) )
lowercase_ : Union[str, Any] = np.multiply(
np.dot(snake_case__, self.wkj ), np.multiply(snake_case__, (1 - bp_outa) ) )
lowercase_ : List[str] = np.dot(snake_case__, self.vji )
lowercase_ : Optional[int] = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase_ : Optional[Any] = pd_conva_pooled.T.getA().tolist()
lowercase_ : List[str] = self._calculate_gradient_from_pool(
snake_case__, snake_case__, shape_featuremapa[0], shape_featuremapa[1], self.size_poolinga, )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase_ : Tuple = self._expand_mat(pd_conva_all[k_conv] )
lowercase_ : Union[str, Any] = self.rate_weight * np.dot(snake_case__, snake_case__ )
lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase_ : List[str] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase_ : int = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase_ : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase_ : Dict = self.thre_bpa - pd_k_all * self.rate_thre
lowercase_ : int = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase_ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase_ : Optional[int] = rp + 1
lowercase_ : Dict = error_count / patterns
all_mse.append(snake_case__ )
def draw_error():
lowercase_ : Tuple = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(snake_case__, """+-""" )
plt.plot(snake_case__, """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(snake_case__, alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def snake_case__ ( self, snake_case__ ) -> List[str]:
"""simple docstring"""
# model predict
lowercase_ : Optional[Any] = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(snake_case__ )) )
for p in range(len(snake_case__ ) ):
lowercase_ : Optional[Any] = np.asmatrix(datas_test[p] )
lowercase_ , lowercase_ : List[Any] = self.convolute(
snake_case__, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
lowercase_ : Any = self.pooling(snake_case__, self.size_poolinga )
lowercase_ : Tuple = self._expand(snake_case__ )
lowercase_ : Optional[Any] = data_bp_input
lowercase_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
lowercase_ : List[str] = self.sig(snake_case__ )
lowercase_ : Any = bp_outa * self.wkj.T - self.thre_bpa
lowercase_ : Any = self.sig(snake_case__ )
produce_out.extend(bp_outa.getA().tolist() )
lowercase_ : str = [list(map(self.do_round, snake_case__ ) ) for each in produce_out]
return np.asarray(snake_case__ )
def snake_case__ ( self, snake_case__ ) -> Dict:
"""simple docstring"""
# return the data of image after convoluting process so we can check it out
lowercase_ : Union[str, Any] = np.asmatrix(snake_case__ )
lowercase_ , lowercase_ : int = self.convolute(
snake_case__, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
lowercase_ : Optional[int] = self.pooling(snake_case__, self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass | 436 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 436 | 1 |
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_snake_case = get_logger(__name__)
def snake_case ( _a: Tuple , _a: int , _a: Dict , _a: Optional[int] , _a: Any=0 )-> Dict:
'''simple docstring'''
os.makedirs(_a , exist_ok=_a )
with FSDP.state_dict_type(
_a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowerCamelCase__ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowerCamelCase__ = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
lowerCamelCase__ = os.path.join(_a , _a )
if accelerator.process_index == 0:
logger.info(F'Saving model to {output_model_file}' )
torch.save(_a , _a )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowerCamelCase__ = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
lowerCamelCase__ = os.path.join(_a , _a )
logger.info(F'Saving model to {output_model_file}' )
torch.save(_a , _a )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowerCamelCase__ = os.path.join(_a , F'{MODEL_NAME}_{model_index}' )
os.makedirs(_a , exist_ok=_a )
logger.info(F'Saving model to {ckpt_dir}' )
lowerCamelCase__ = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=_a , storage_writer=dist_cp.FileSystemWriter(_a ) , planner=DefaultSavePlanner() , )
logger.info(F'Model saved to {ckpt_dir}' )
def snake_case ( _a: Optional[int] , _a: Optional[Any] , _a: List[str] , _a: int , _a: Dict=0 )-> int:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_a ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
lowerCamelCase__ = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
lowerCamelCase__ = os.path.join(_a , _a )
logger.info(F'Loading model from {input_model_file}' )
lowerCamelCase__ = torch.load(_a )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowerCamelCase__ = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
lowerCamelCase__ = os.path.join(_a , _a )
logger.info(F'Loading model from {input_model_file}' )
lowerCamelCase__ = torch.load(_a )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowerCamelCase__ = (
os.path.join(_a , F'{MODEL_NAME}_{model_index}' )
if F'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading model from {ckpt_dir}' )
lowerCamelCase__ = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=_a , storage_reader=dist_cp.FileSystemReader(_a ) , planner=DefaultLoadPlanner() , )
lowerCamelCase__ = state_dict['model']
logger.info(F'Model loaded from {ckpt_dir}' )
model.load_state_dict(_a )
def snake_case ( _a: List[Any] , _a: Any , _a: str , _a: str , _a: Any , _a: int=0 )-> List[str]:
'''simple docstring'''
os.makedirs(_a , exist_ok=_a )
with FSDP.state_dict_type(
_a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowerCamelCase__ = FSDP.optim_state_dict(_a , _a )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
lowerCamelCase__ = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
lowerCamelCase__ = os.path.join(_a , _a )
logger.info(F'Saving Optimizer state to {output_optimizer_file}' )
torch.save(_a , _a )
logger.info(F'Optimizer state saved in {output_optimizer_file}' )
else:
lowerCamelCase__ = os.path.join(_a , F'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(_a , exist_ok=_a )
logger.info(F'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(_a ) , planner=DefaultSavePlanner() , )
logger.info(F'Optimizer state saved in {ckpt_dir}' )
def snake_case ( _a: List[Any] , _a: Any , _a: int , _a: Optional[int] , _a: List[str] , _a: Union[str, Any]=0 )-> Dict:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowerCamelCase__ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
lowerCamelCase__ = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
lowerCamelCase__ = os.path.join(_a , _a )
logger.info(F'Loading Optimizer state from {input_optimizer_file}' )
lowerCamelCase__ = torch.load(_a )
logger.info(F'Optimizer state loaded from {input_optimizer_file}' )
else:
lowerCamelCase__ = (
os.path.join(_a , F'{OPTIMIZER_NAME}_{optimizer_index}' )
if F'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading Optimizer from {ckpt_dir}' )
lowerCamelCase__ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(_a ) , )
lowerCamelCase__ = optim_state['optimizer']
logger.info(F'Optimizer loaded from {ckpt_dir}' )
lowerCamelCase__ = FSDP.optim_state_dict_to_load(_a , _a , _a )
optimizer.load_state_dict(_a )
| 510 |
"""simple docstring"""
def snake_case ( _a: int , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowerCamelCase__ = n - k
# Calculate C(n,k)
for i in range(_a ):
result *= n - i
result //= i + 1
return result
def snake_case ( _a: int )-> int:
'''simple docstring'''
return binomial_coefficient(2 * node_count , _a ) // (node_count + 1)
def snake_case ( _a: int )-> int:
'''simple docstring'''
if n < 0:
raise ValueError('factorial() not defined for negative values' )
lowerCamelCase__ = 1
for i in range(1 , n + 1 ):
result *= i
return result
def snake_case ( _a: int )-> int:
'''simple docstring'''
return catalan_number(_a ) * factorial(_a )
if __name__ == "__main__":
_snake_case = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
f"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
f"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 510 | 1 |
'''simple docstring'''
from math import pi, sqrt
def lowerCAmelCase__ ( lowerCamelCase_ : float):
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''')
if num > 171.5:
raise OverflowError('''math range error''')
elif num - int(_A) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''')
elif num == 0.5:
return sqrt(_A)
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1)
def lowerCAmelCase__ ( ):
'''simple docstring'''
assert gamma(0.5) == sqrt(_A)
assert gamma(1) == 1.0
assert gamma(2) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__snake_case : Dict =1.0
while num:
__snake_case : List[str] =float(input('Gamma of: '))
print(f"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 705 |
from bisect import bisect
from itertools import accumulate
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : List[str] ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : Any = sorted(zip(lowerCamelCase_ ,lowerCamelCase_) ,key=lambda lowerCamelCase_: x[0] / x[1] ,reverse=lowerCamelCase_)
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = [i[0] for i in r], [i[1] for i in r]
lowerCAmelCase__ : Tuple = list(accumulate(lowerCamelCase_))
lowerCAmelCase__ : str = bisect(lowerCamelCase_ ,lowerCamelCase_)
return (
0
if k == 0
else sum(vl[:k]) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k])
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 | 0 |
'''simple docstring'''
import os
def lowerCAmelCase_ ( __A : str = "matrix.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(__A ) , __A ) ) as in_file:
snake_case: Any = in_file.read()
snake_case: Dict = [[int(__A ) for cell in row.split(',' )] for row in data.strip().splitlines()]
snake_case: List[Any] = [[0 for cell in row] for row in grid]
snake_case: Any = len(grid[0] )
snake_case: Optional[Any] = [[0 for i in range(__A )] for j in range(__A )]
snake_case: Dict = grid[0][0]
for i in range(1 , __A ):
snake_case: Optional[int] = grid[0][i] + dp[0][i - 1]
for i in range(1 , __A ):
snake_case: Optional[int] = grid[i][0] + dp[i - 1][0]
for i in range(1 , __A ):
for j in range(1 , __A ):
snake_case: Union[str, Any] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'{solution() = }') | 329 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( __A : Union[str, Any] ):
'''simple docstring'''
snake_case: Union[str, Any] = r'\w+[.]\d+'
snake_case: Union[str, Any] = re.findall(__A , __A )
for pat in pats:
snake_case: Any = key.replace(__A , '_'.join(pat.split('.' ) ) )
return key
def lowerCAmelCase_ ( __A : List[Any] , __A : List[Any] , __A : List[str] ):
'''simple docstring'''
snake_case: str = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
snake_case: Union[str, Any] = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
snake_case: Dict = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
snake_case: Tuple = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
snake_case: List[str] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
snake_case: Optional[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
snake_case: List[Any] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
snake_case: Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
snake_case: Any = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
snake_case: Optional[int] = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase_ ( __A : Any , __A : Union[str, Any] , __A : Optional[Any]=42 ):
'''simple docstring'''
snake_case: int = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
snake_case: int = flax_model.init_weights(PRNGKey(__A ) )
snake_case: Optional[Any] = flatten_dict(__A )
snake_case: Any = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case: Any = rename_key(__A )
snake_case: List[Any] = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
snake_case , snake_case: Any = rename_key_and_reshape_tensor(__A , __A , __A )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
snake_case: Union[str, Any] = jnp.asarray(__A )
return unflatten_dict(__A ) | 329 | 1 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
UpperCamelCase : List[Any] = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Any:
"""simple docstring"""
a : Optional[Any] = git.Repo(search_parent_directories=snake_case )
a : Dict = {
'repo_id': str(snake_case ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(snake_case , 'git_log.json' ) , 'w' ) as f:
json.dump(snake_case , snake_case , indent=4 )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if params.n_gpu <= 0:
a : Any = 0
a : Optional[int] = -1
a : int = True
a : Dict = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
a : Dict = int(os.environ['WORLD_SIZE'] )
a : Dict = int(os.environ['N_GPU_NODE'] )
a : int = int(os.environ['RANK'] )
# number of nodes / node ID
a : Dict = params.world_size // params.n_gpu_per_node
a : str = params.global_rank // params.n_gpu_per_node
a : Union[str, Any] = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
a : Any = 1
a : Union[str, Any] = 0
a : str = 0
a : Tuple = 0
a : int = 1
a : Optional[Any] = 1
a : Dict = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
a : Any = params.node_id == 0 and params.local_rank == 0
a : str = params.n_nodes > 1
# summary
a : Any = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> Any:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 610 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase : Optional[int] = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
UpperCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 610 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Any =['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = size if size is not None else {'''shortest_edge''': 224}
UpperCamelCase :Any = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256}
UpperCamelCase :Any = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
UpperCamelCase :Optional[Any] = do_resize
UpperCamelCase :Dict = size
UpperCamelCase :Tuple = resample
UpperCamelCase :List[Any] = do_rescale
UpperCamelCase :Tuple = rescale_factor
UpperCamelCase :List[Any] = do_center_crop
UpperCamelCase :Dict = crop_size
UpperCamelCase :Any = do_flip_channel_order
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PIL.Image.BILINEAR , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> np.ndarray:
UpperCamelCase :Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase :int = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> np.ndarray:
UpperCamelCase :int = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Dict:
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> np.ndarray:
return flip_channel_order(SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ) -> PIL.Image.Image:
UpperCamelCase :List[Any] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase :str = resample if resample is not None else self.resample
UpperCamelCase :List[str] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase :List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase :Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase :Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCamelCase :List[str] = size if size is not None else self.size
UpperCamelCase :Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = crop_size if crop_size is not None else self.crop_size
UpperCamelCase :Dict = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
UpperCamelCase :List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
UpperCamelCase :List[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
UpperCamelCase :Tuple = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
UpperCamelCase :List[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
UpperCamelCase :str = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCamelCase :List[str] = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase :Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase :List[str] = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Union[str, Any]:
UpperCamelCase :int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Union[str, Any] = target_sizes.numpy()
UpperCamelCase :Dict = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase :Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase :Optional[Any] = logits.argmax(dim=1 )
UpperCamelCase :List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 658 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Union[str, Any] = tempfile.mkdtemp()
UpperCamelCase :List[str] = 5
# Realm tok
UpperCamelCase :List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCamelCase :Dict = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCamelCase :Any = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def UpperCAmelCase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Union[str, Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase :Tuple = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[Any] = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=SCREAMING_SNAKE_CASE_ , )
return block_records
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Optional[int] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Optional[Any] = self.get_config()
UpperCamelCase :str = self.get_dummy_retriever()
UpperCamelCase :int = retriever.tokenizer
UpperCamelCase :Optional[Any] = np.array([0, 3] , dtype='''long''' )
UpperCamelCase :Optional[Any] = tokenizer(['''Test question'''] ).input_ids
UpperCamelCase :Tuple = tokenizer(
['''the fourth'''] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids
UpperCamelCase :Optional[Any] = config.reader_seq_len
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :str = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Union[str, Any] = self.get_config()
UpperCamelCase :Union[str, Any] = self.get_dummy_retriever()
UpperCamelCase :Dict = retriever.tokenizer
UpperCamelCase :str = np.array([0, 3, 5] , dtype='''long''' )
UpperCamelCase :List[str] = tokenizer(['''Test question'''] ).input_ids
UpperCamelCase :Optional[Any] = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids
UpperCamelCase :Any = config.reader_seq_len
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
self.assertEqual([False, True, True] , SCREAMING_SNAKE_CASE_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , SCREAMING_SNAKE_CASE_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :str = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
UpperCamelCase :List[str] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
UpperCamelCase :Tuple = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCamelCase :List[Any] = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 658 | 1 |
import os
import string
import sys
__a : Dict = 1 << 8
__a : List[str] = {
'''tab''': ord("""\t"""),
'''newline''': ord("""\r"""),
'''esc''': 2_7,
'''up''': 6_5 + ARROW_KEY_FLAG,
'''down''': 6_6 + ARROW_KEY_FLAG,
'''right''': 6_7 + ARROW_KEY_FLAG,
'''left''': 6_8 + ARROW_KEY_FLAG,
'''mod_int''': 9_1,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 5_0,
'''delete''': 5_1,
'''pg_up''': 5_3,
'''pg_down''': 5_4,
}
__a : int = KEYMAP['''up''']
__a : List[str] = KEYMAP['''left''']
if sys.platform == "win32":
__a : List[str] = []
__a : Dict = {
b'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
b'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(1_0):
__a : Any = ord(str(i))
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
if os.name == "nt":
import msvcrt
UpperCamelCase = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_lowercase ) == 0:
# Read the keystroke
UpperCamelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
UpperCamelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
UpperCamelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(_lowercase )
if ord(_lowercase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
UpperCamelCase = chr(KEYMAP["esc"] )
except KeyError:
UpperCamelCase = cha[1]
else:
UpperCamelCase = ch.decode(_lowercase )
else:
UpperCamelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
UpperCamelCase = sys.stdin.fileno()
UpperCamelCase = termios.tcgetattr(_lowercase )
try:
tty.setraw(_lowercase )
UpperCamelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(_lowercase , termios.TCSADRAIN , _lowercase )
return ch
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = get_raw_chars()
if ord(_lowercase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_lowercase ) == KEYMAP["esc"]:
UpperCamelCase = get_raw_chars()
if ord(_lowercase ) == KEYMAP["mod_int"]:
UpperCamelCase = get_raw_chars()
if ord(_lowercase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_lowercase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_lowercase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 709 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
__a : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="This is a sound of {}." ) -> Optional[int]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if audio.startswith("http://" ) or audio.startswith("https://" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
UpperCamelCase = requests.get(SCREAMING_SNAKE_CASE ).content
else:
with open(SCREAMING_SNAKE_CASE , "rb" ) as f:
UpperCamelCase = f.read()
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase = ffmpeg_read(SCREAMING_SNAKE_CASE , self.feature_extractor.sampling_rate )
if not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
raise ValueError("We expect a numpy ndarray as input" )
if len(audio.shape ) != 1:
raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" )
UpperCamelCase = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="pt" )
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(SCREAMING_SNAKE_CASE ) for x in candidate_labels]
UpperCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE )
UpperCamelCase = [text_inputs]
return inputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = model_inputs.pop("candidate_labels" )
UpperCamelCase = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , SCREAMING_SNAKE_CASE ):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_audio,
}
return model_outputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase = model_outputs.pop("candidate_labels" )
UpperCamelCase = model_outputs["logits"][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=0 )
UpperCamelCase = probs.tolist()
else:
raise ValueError("`tf` framework not supported." )
UpperCamelCase = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , key=lambda SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 414 | 0 |
'''simple docstring'''
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE ( __a ):
'''simple docstring'''
__UpperCamelCase = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
__UpperCamelCase = """CIDAS/clipseg-rd64-refined"""
__UpperCamelCase = """image_segmenter"""
__UpperCamelCase = CLIPSegForImageSegmentation
__UpperCamelCase = ["""image""", """text"""]
__UpperCamelCase = ["""image"""]
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
requires_backends(self , ['vision'] )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='pt' )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
with torch.no_grad():
snake_case: Union[str, Any] = self.model(**_lowerCamelCase ).logits
return logits
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: List[Any] = outputs.cpu().detach().numpy()
snake_case: Tuple = 0
snake_case: Optional[int] = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) ) | 329 |
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ) ->str:
__lowercase = os.path.abspath(__magic_name__ )
logger.info(F'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
__lowercase = tf.train.list_variables(__magic_name__ )
__lowercase = []
__lowercase = []
__lowercase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__lowercase = full_name.split("/" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(F'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
__lowercase = name[1:]
# figure out how many levels deep the name is
__lowercase = 0
for _name in name:
if _name.startswith("layer_with_weights" ):
depth += 1
else:
break
layer_depth.append(__magic_name__ )
# read data
__lowercase = tf.train.load_variable(__magic_name__ , __magic_name__ )
names.append("/".join(__magic_name__ ) )
arrays.append(__magic_name__ )
logger.info(F'''Read a total of {len(__magic_name__ ):,} layers''' )
# Sanity check
if len(set(__magic_name__ ) ) != 1:
raise ValueError(F'''Found layer names with different depths (layer depth {list(set(__magic_name__ ) )})''' )
__lowercase = list(set(__magic_name__ ) )[0]
if layer_depth != 1:
raise ValueError(
"The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"
" heads." )
# convert layers
logger.info("Converting weights..." )
for full_name, array in zip(__magic_name__ , __magic_name__ ):
__lowercase = full_name.split("/" )
__lowercase = model
__lowercase = []
for i, m_name in enumerate(__magic_name__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("layer_with_weights" ):
__lowercase = int(m_name.split("-" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["embeddings", "LayerNorm"] )
__lowercase = getattr(__magic_name__ , "embeddings" )
__lowercase = getattr(__magic_name__ , "LayerNorm" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["encoder", "layer", str(layer_num - 4 )] )
__lowercase = getattr(__magic_name__ , "encoder" )
__lowercase = getattr(__magic_name__ , "layer" )
__lowercase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["pooler", "dense"] )
__lowercase = getattr(__magic_name__ , "pooler" )
__lowercase = getattr(__magic_name__ , "dense" )
elif m_name == "embeddings":
trace.append("embeddings" )
__lowercase = getattr(__magic_name__ , "embeddings" )
if layer_num == 0:
trace.append("word_embeddings" )
__lowercase = getattr(__magic_name__ , "word_embeddings" )
elif layer_num == 1:
trace.append("position_embeddings" )
__lowercase = getattr(__magic_name__ , "position_embeddings" )
elif layer_num == 2:
trace.append("token_type_embeddings" )
__lowercase = getattr(__magic_name__ , "token_type_embeddings" )
else:
raise ValueError(F'''Unknown embedding layer with name {full_name}''' )
trace.append("weight" )
__lowercase = getattr(__magic_name__ , "weight" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["attention", "self"] )
__lowercase = getattr(__magic_name__ , "attention" )
__lowercase = getattr(__magic_name__ , "self" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["attention", "output", "LayerNorm"] )
__lowercase = getattr(__magic_name__ , "attention" )
__lowercase = getattr(__magic_name__ , "output" )
__lowercase = getattr(__magic_name__ , "LayerNorm" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["attention", "output", "dense"] )
__lowercase = getattr(__magic_name__ , "attention" )
__lowercase = getattr(__magic_name__ , "output" )
__lowercase = getattr(__magic_name__ , "dense" )
elif m_name == "_output_dense":
# output dense
trace.extend(["output", "dense"] )
__lowercase = getattr(__magic_name__ , "output" )
__lowercase = getattr(__magic_name__ , "dense" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["output", "LayerNorm"] )
__lowercase = getattr(__magic_name__ , "output" )
__lowercase = getattr(__magic_name__ , "LayerNorm" )
elif m_name == "_key_dense":
# attention key
trace.append("key" )
__lowercase = getattr(__magic_name__ , "key" )
elif m_name == "_query_dense":
# attention query
trace.append("query" )
__lowercase = getattr(__magic_name__ , "query" )
elif m_name == "_value_dense":
# attention value
trace.append("value" )
__lowercase = getattr(__magic_name__ , "value" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["intermediate", "dense"] )
__lowercase = getattr(__magic_name__ , "intermediate" )
__lowercase = getattr(__magic_name__ , "dense" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("output" )
__lowercase = getattr(__magic_name__ , "output" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("bias" )
__lowercase = getattr(__magic_name__ , "bias" )
elif m_name in ["kernel", "gamma"]:
trace.append("weight" )
__lowercase = getattr(__magic_name__ , "weight" )
else:
logger.warning(F'''Ignored {m_name}''' )
# for certain layers reshape is necessary
__lowercase = ".".join(__magic_name__ )
if re.match(R"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , __magic_name__ ) or re.match(
R"(\S+)\.attention\.output\.dense\.weight" , __magic_name__ ):
__lowercase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__lowercase = array.transpose()
if pointer.shape == array.shape:
__lowercase = torch.from_numpy(__magic_name__ )
else:
raise ValueError(
F'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
F''' {array.shape}''' )
logger.info(F'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ) ->str:
# Instantiate model
logger.info(F'''Loading model based on config from {config_path}...''' )
__lowercase = BertConfig.from_json_file(__magic_name__ )
__lowercase = BertModel(__magic_name__ )
# Load weights from checkpoint
logger.info(F'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(__magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
logger.info(F'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , __magic_name__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
_lowercase = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 118 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[int] = ["pixel_values"]
def __init__( self : List[Any] ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Union[int, float] = 1 / 255 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,**lowerCamelCase__ : List[str] ,) -> None:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 256}
SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase__ ,param_name="""crop_size""" )
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_center_crop
SCREAMING_SNAKE_CASE = crop_size
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = offset
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Any ,) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE = get_resize_output_image_size(lowerCamelCase__ ,size["""shortest_edge"""] ,default_to_square=lowerCamelCase__ )
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE = (size["""height"""], size["""width"""])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : int ,) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(lowerCamelCase__ ,size=(size["""height"""], size["""width"""]) ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[int, float] ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : int ,) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = image.astype(np.floataa )
if offset:
SCREAMING_SNAKE_CASE = image - (scale / 2)
return rescale(lowerCamelCase__ ,scale=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[float, List[float]] ,lowerCamelCase__ : Union[float, List[float]] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : List[Any] ,) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : ImageInput ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : float = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST ,) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = to_numpy_array(lowerCamelCase__ )
if do_resize:
SCREAMING_SNAKE_CASE = self.resize(image=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ )
if do_center_crop:
SCREAMING_SNAKE_CASE = self.center_crop(lowerCamelCase__ ,size=lowerCamelCase__ )
if do_rescale:
SCREAMING_SNAKE_CASE = self.rescale(image=lowerCamelCase__ ,scale=lowerCamelCase__ ,offset=lowerCamelCase__ )
if do_normalize:
SCREAMING_SNAKE_CASE = self.normalize(image=lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = to_channel_dimension_format(lowerCamelCase__ ,lowerCamelCase__ )
return image
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : ImageInput ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : float = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST ,**lowerCamelCase__ : Optional[int] ,) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = offset if offset is not None else self.offset
SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase__ ,param_name="""crop_size""" )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
SCREAMING_SNAKE_CASE = make_batched(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = [
[
self._preprocess_image(
image=lowerCamelCase__ ,do_resize=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,do_center_crop=lowerCamelCase__ ,crop_size=lowerCamelCase__ ,do_rescale=lowerCamelCase__ ,rescale_factor=lowerCamelCase__ ,offset=lowerCamelCase__ ,do_normalize=lowerCamelCase__ ,image_mean=lowerCamelCase__ ,image_std=lowerCamelCase__ ,data_format=lowerCamelCase__ ,)
for img in video
]
for video in videos
]
SCREAMING_SNAKE_CASE = {"""pixel_values""": videos}
return BatchFeature(data=lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
| 116 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for line in lines:
SCREAMING_SNAKE_CASE = re.sub(r"""#.*""" , """""" , _SCREAMING_SNAKE_CASE ) # remove comments
if line:
filtered_lines.append(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = """\n""".join(_SCREAMING_SNAKE_CASE )
# Make a hash from all this code
SCREAMING_SNAKE_CASE = full_str.encode("""utf-8""" )
return shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest()
# get importable module names and hash for caching
SCREAMING_SNAKE_CASE_ = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
SCREAMING_SNAKE_CASE_ = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
SCREAMING_SNAKE_CASE_ = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
SCREAMING_SNAKE_CASE_ = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 116 | 1 |
'''simple docstring'''
from __future__ import annotations
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : int = 0 ) -> Dict:
'''simple docstring'''
_UpperCamelCase = key
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int ) -> list[str]:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowerCAmelCase__ ) ^ key ) for ch in content]
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int ) -> list[str]:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowerCAmelCase__ ) ^ key ) for ch in content]
def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int = 0 ) -> str:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_UpperCamelCase = ''''''
for ch in content:
ans += chr(ord(lowerCAmelCase__ ) ^ key )
return ans
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int = 0 ) -> str:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_UpperCamelCase = ''''''
for ch in content:
ans += chr(ord(lowerCAmelCase__ ) ^ key )
return ans
def snake_case__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : int = 0 ) -> bool:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
try:
with open(lowerCAmelCase__ ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCAmelCase__ , lowerCAmelCase__ ) )
except OSError:
return False
return True
def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int ) -> bool:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
try:
with open(lowerCAmelCase__ ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCAmelCase__ , lowerCAmelCase__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 98 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase__ ( a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = KandinskyVaaControlnetImgaImgPipeline
__SCREAMING_SNAKE_CASE = ["image_embeds", "negative_image_embeds", "image", "hint"]
__SCREAMING_SNAKE_CASE = ["image_embeds", "negative_image_embeds", "image", "hint"]
__SCREAMING_SNAKE_CASE = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__SCREAMING_SNAKE_CASE = False
@property
def _lowerCamelCase ( self) -> Tuple:
return 3_2
@property
def _lowerCamelCase ( self) -> Dict:
return 3_2
@property
def _lowerCamelCase ( self) -> str:
return self.time_input_dim
@property
def _lowerCamelCase ( self) -> str:
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self) -> str:
return 1_0_0
@property
def _lowerCamelCase ( self) -> Tuple:
torch.manual_seed(0)
_A : str = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_A : str = UNetaDConditionModel(**__lowerCamelCase)
return model
@property
def _lowerCamelCase ( self) -> str:
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowerCamelCase ( self) -> List[Any]:
torch.manual_seed(0)
_A : int = VQModel(**self.dummy_movq_kwargs)
return model
def _lowerCamelCase ( self) -> Any:
_A : List[str] = self.dummy_unet
_A : Optional[int] = self.dummy_movq
_A : str = {
"num_train_timesteps": 1_0_0_0,
"beta_schedule": "linear",
"beta_start": 0.0_0_0_8_5,
"beta_end": 0.0_1_2,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_A : int = DDIMScheduler(**__lowerCamelCase)
_A : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase=0) -> Any:
_A : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase)).to(__lowerCamelCase)
_A : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
__lowerCamelCase)
# create init_image
_A : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__lowerCamelCase)).to(__lowerCamelCase)
_A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1)[0]
_A : Union[str, Any] = Image.fromarray(np.uinta(__lowerCamelCase)).convert("RGB").resize((2_5_6, 2_5_6))
# create hint
_A : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__lowerCamelCase)).to(__lowerCamelCase)
if str(__lowerCamelCase).startswith("mps"):
_A : Dict = torch.manual_seed(__lowerCamelCase)
else:
_A : Tuple = torch.Generator(device=__lowerCamelCase).manual_seed(__lowerCamelCase)
_A : Tuple = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 1_0,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def _lowerCamelCase ( self) -> int:
_A : List[Any] = "cpu"
_A : List[Any] = self.get_dummy_components()
_A : Any = self.pipeline_class(**__lowerCamelCase)
_A : Any = pipe.to(__lowerCamelCase)
pipe.set_progress_bar_config(disable=__lowerCamelCase)
_A : str = pipe(**self.get_dummy_inputs(__lowerCamelCase))
_A : int = output.images
_A : Dict = pipe(
**self.get_dummy_inputs(__lowerCamelCase) , return_dict=__lowerCamelCase , )[0]
_A : Any = image[0, -3:, -3:, -1]
_A : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_A : List[str] = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self) -> Tuple:
_A : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy")
_A : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
_A : str = init_image.resize((5_1_2, 5_1_2))
_A : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
_A : Optional[Any] = torch.from_numpy(np.array(__lowerCamelCase)).float() / 2_5_5.0
_A : Optional[Any] = hint.permute(2 , 0 , 1).unsqueeze(0)
_A : List[str] = "A robot, 4k photo"
_A : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(__lowerCamelCase)
_A : List[str] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
_A : Dict = pipeline.to(__lowerCamelCase)
pipeline.set_progress_bar_config(disable=__lowerCamelCase)
_A : Tuple = torch.Generator(device="cpu").manual_seed(0)
_A , _A : List[str] = pipe_prior(
__lowerCamelCase , image=__lowerCamelCase , strength=0.8_5 , generator=__lowerCamelCase , negative_prompt="" , ).to_tuple()
_A : Optional[int] = pipeline(
image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , hint=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type="np" , )
_A : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase)
| 503 | 0 |
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :int = len(A_ )
UpperCAmelCase__ :Optional[Any] = len(A_ )
UpperCAmelCase__ :Optional[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
UpperCAmelCase__ :int = True
for i in range(A_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCAmelCase__ :Optional[int] = True
if a[i].islower():
UpperCAmelCase__ :Optional[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__snake_case : Dict = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 48_000,
'sample_size': 65_536,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 48_000,
'sample_size': 65_536,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 48_000,
'sample_size': 131_072,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
}
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return torch.atana(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / math.pi * 2
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :List[str] = torch.sin(t * math.pi / 2 ) ** 2
UpperCAmelCase__ :int = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
class UpperCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
pass
class UpperCamelCase__ ( nn.Module):
'''simple docstring'''
def __init__( self , A ) ->Union[str, Any]:
super().__init__()
UpperCAmelCase__ :Dict = DiffusionAttnUnetaD(A , n_attn_layers=4 )
UpperCAmelCase__ :Optional[Any] = deepcopy(self.diffusion )
UpperCAmelCase__ :str = torch.quasirandom.SobolEngine(1 , scramble=A )
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :Dict = MODELS_MAP[model_name]['url']
os.system(f"""wget {url} ./""" )
return f"""./{model_name}.ckpt"""
__snake_case : Union[str, Any] = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
__snake_case : Tuple = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
__snake_case : Optional[int] = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
__snake_case : str = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
__snake_case : Optional[int] = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
__snake_case : Union[str, Any] = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(f"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE ) and not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return name.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif name.startswith(SCREAMING_SNAKE_CASE ):
return [name.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for v in value]
raise ValueError(f"""Attn error with {name}""" )
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 ):
"""simple docstring"""
UpperCAmelCase__ :Any = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
UpperCAmelCase__ :List[Any] = 0
if string.startswith('net.3.' ):
depth += 1
UpperCAmelCase__ :int = string[6:]
elif string.startswith('net.' ):
UpperCAmelCase__ :Union[str, Any] = string[4:]
while string.startswith('main.7.' ):
depth += 1
UpperCAmelCase__ :List[str] = string[7:]
if string.startswith('main.' ):
UpperCAmelCase__ :Tuple = string[5:]
# mid block
if string[:2].isdigit():
UpperCAmelCase__ :int = string[:2]
UpperCAmelCase__ :List[Any] = string[2:]
else:
UpperCAmelCase__ :Dict = string[0]
UpperCAmelCase__ :Union[str, Any] = string[1:]
if depth == max_depth:
UpperCAmelCase__ :Dict = MID_NUM_TO_LAYER[layer_num]
UpperCAmelCase__ :int = 'mid_block'
elif depth > 0 and int(SCREAMING_SNAKE_CASE ) < 7:
UpperCAmelCase__ :List[Any] = DOWN_NUM_TO_LAYER[layer_num]
UpperCAmelCase__ :Any = f"""down_blocks.{depth}"""
elif depth > 0 and int(SCREAMING_SNAKE_CASE ) > 7:
UpperCAmelCase__ :Union[str, Any] = UP_NUM_TO_LAYER[layer_num]
UpperCAmelCase__ :Union[str, Any] = f"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
UpperCAmelCase__ :List[str] = DEPTH_0_TO_LAYER[layer_num]
UpperCAmelCase__ :List[str] = f"""up_blocks.{max_depth - 1}""" if int(SCREAMING_SNAKE_CASE ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(f"""Naming error with {input_string} and string_left: {string_left}.""" )
UpperCAmelCase__ :int = string_left[1:]
if "resnets" in new_layer:
UpperCAmelCase__ :Optional[Any] = convert_resconv_naming(SCREAMING_SNAKE_CASE )
elif "attentions" in new_layer:
UpperCAmelCase__ :Any = convert_attn_naming(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Any = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ :Tuple = prefix + '.' + new_layer + '.' + string_left
else:
UpperCAmelCase__ :int = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :str = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
UpperCAmelCase__ :Tuple = rename(SCREAMING_SNAKE_CASE )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ :Tuple = transform_conv_attns(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase__ :Union[str, Any] = v
return new_state_dict
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 1:
if len(v.shape ) == 3:
# weight
UpperCAmelCase__ :List[str] = v[:, :, 0]
else:
# bias
UpperCAmelCase__ :Union[str, Any] = v
else:
# qkv matrices
UpperCAmelCase__ :Optional[int] = v.shape[0]
UpperCAmelCase__ :str = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
UpperCAmelCase__ :Optional[int] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
UpperCAmelCase__ :Optional[Any] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :Union[str, Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
UpperCAmelCase__ :List[Any] = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
UpperCAmelCase__ :List[str] = download(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Optional[int] = MODELS_MAP[model_name]['sample_rate']
UpperCAmelCase__ :Tuple = MODELS_MAP[model_name]['sample_size']
UpperCAmelCase__ :Optional[Any] = Object()
UpperCAmelCase__ :int = sample_size
UpperCAmelCase__ :Any = sample_rate
UpperCAmelCase__ :List[str] = 0
UpperCAmelCase__ :Union[str, Any] = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE , sample_rate=SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Dict = diffusers_model.state_dict()
UpperCAmelCase__ :Dict = DiffusionUncond(SCREAMING_SNAKE_CASE )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE )['state_dict'] )
UpperCAmelCase__ :Dict = orig_model.diffusion_ema.eval()
UpperCAmelCase__ :Optional[Any] = orig_model.state_dict()
UpperCAmelCase__ :Tuple = rename_orig_weights(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Dict = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
UpperCAmelCase__ :Dict = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE ) == 0, f"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('kernel' ) for k in list(SCREAMING_SNAKE_CASE ) ), f"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
UpperCAmelCase__ :List[Any] = value.squeeze()
UpperCAmelCase__ :List[Any] = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Optional[int] = 100
UpperCAmelCase__ :Any = 33
UpperCAmelCase__ :int = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :int = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Optional[int] = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE )[:-1]
UpperCAmelCase__ :List[Any] = get_crash_schedule(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Tuple = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Optional[Any] = torch.manual_seed(33 )
UpperCAmelCase__ :List[str] = pipe(num_inference_steps=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).audios
UpperCAmelCase__ :Any = sampling.iplms_sample(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , {} )
UpperCAmelCase__ :List[Any] = generated.clamp(-1 , 1 )
UpperCAmelCase__ :int = (generated - audio).abs().sum()
UpperCAmelCase__ :Optional[Any] = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , SCREAMING_SNAKE_CASE )
print('Diff max' , SCREAMING_SNAKE_CASE )
assert diff_max < 1E-3, f"""Diff max: {diff_max} is too much :-/"""
print(f"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
__snake_case : Any = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
__snake_case : Union[str, Any] = parser.parse_args()
main(args)
| 433 | 0 |
import sys
import turtle
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase )-> Dict:
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,)-> Tuple:
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
if depth == 0:
return
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
A_ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
A_ = [(-1_7_5, -1_2_5), (0, 1_7_5), (1_7_5, -1_2_5)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 393 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = CTRLTokenizer
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Any = False
def a__ (self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_a = dict(zip(A , range(len(A ) ) ) )
_a = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def a__ (self , **A ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **A )
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a = '''adapt react readapt apt'''
_a = '''adapt react readapt apt'''
return input_text, output_text
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = '''adapt react readapt apt'''
_a = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_a = tokenizer.tokenize(A )
self.assertListEqual(A , A )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
| 11 | 0 |
from ....utils import logging
_lowerCamelCase = logging.get_logger(__name__)
class __A ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , a__ , a__=None , a__=2048):
"""simple docstring"""
_lowerCamelCase : Any = config.__dict__
_lowerCamelCase : Optional[int] = modal_hidden_size
if num_labels:
_lowerCamelCase : Optional[Any] = num_labels
| 716 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , *a__ , **a__):
"""simple docstring"""
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , a__ , )
super().__init__(*a__ , **a__)
| 613 | 0 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
def __init__( self , lowerCamelCase="" , lowerCamelCase="train" ):
assert os.path.isdir(lowerCamelCase )
snake_case__ = []
snake_case__ = os.listdir(lowerCamelCase )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
snake_case__ = os.path.join(lowerCamelCase , lowerCamelCase )
if not os.path.isfile(lowerCamelCase ):
continue
self.documents.append(lowerCamelCase )
def __len__( self ):
return len(self.documents )
def __getitem__( self , lowerCamelCase ):
snake_case__ = self.documents[idx]
snake_case__ = document_path.split("/" )[-1]
with open(lowerCamelCase , encoding="utf-8" ) as source:
snake_case__ = source.read()
snake_case__ , snake_case__ = process_story(lowerCamelCase )
return document_name, story_lines, summary_lines
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = list(filter(lambda __lowerCAmelCase : len(__lowerCAmelCase ) != 0 , [line.strip() for line in raw_story.split("\n" )] ) )
# for some unknown reason some lines miss a period, add it
snake_case__ = [_add_missing_period(__lowerCAmelCase ) for line in nonempty_lines]
# gather article lines
snake_case__ = []
snake_case__ = deque(__lowerCAmelCase )
while True:
try:
snake_case__ = lines.popleft()
if element.startswith("@highlight" ):
break
story_lines.append(__lowerCAmelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
snake_case__ = list(filter(lambda __lowerCAmelCase : not t.startswith("@highlight" ) , __lowerCAmelCase ) )
return story_lines, summary_lines
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"]
if line.startswith("@highlight" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if len(__lowerCAmelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__lowerCAmelCase )) )
return sequence
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = torch.ones_like(__lowerCAmelCase )
snake_case__ = sequence == pad_token_id
snake_case__ = 0
return mask
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = [tokenizer.encode(__lowerCAmelCase ) for line in story_lines]
snake_case__ = [token for sentence in story_lines_token_ids for token in sentence]
snake_case__ = [tokenizer.encode(__lowerCAmelCase ) for line in summary_lines]
snake_case__ = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = []
for sequence in batch:
snake_case__ = -1
snake_case__ = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__lowerCAmelCase )
return torch.tensor(__lowerCAmelCase )
| 276 |
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
if any(not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(__lowerCAmelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__lowerCAmelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 276 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( ):
__snake_case = 0
for i in range(1, 10_01):
total += i**i
return str(UpperCAmelCase__)[-10:]
if __name__ == "__main__":
print(solution())
| 708 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase : int = logging.get_logger(__name__)
__lowercase : Tuple = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowercase : List[Any] = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowercase : List[str] = {"facebook/blenderbot-3B": 128}
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = ['''input_ids''', '''attention_mask''']
UpperCamelCase_ : str = BlenderbotTokenizer
def __init__( self : Union[str, Any] , A_ : Any=None , A_ : Optional[int]=None , A_ : Optional[Any]=None , A_ : Union[str, Any]="replace" , A_ : Union[str, Any]="<s>" , A_ : Union[str, Any]="</s>" , A_ : Optional[int]="</s>" , A_ : List[Any]="<s>" , A_ : Union[str, Any]="<unk>" , A_ : Any="<pad>" , A_ : List[str]="<mask>" , A_ : int=False , A_ : Tuple=True , **A_ : Optional[Any] , ) -> int:
super().__init__(
A_ , A_ , tokenizer_file=A_ , errors=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , trim_offsets=A_ , **A_ , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , A_ ) != add_prefix_space:
__snake_case = getattr(A_ , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**A_ )
__snake_case = add_prefix_space
__snake_case = '''post_processor'''
__snake_case = getattr(self.backend_tokenizer , A_ , A_ )
if tokenizer_component_instance:
__snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case = tuple(state['''sep'''] )
if "cls" in state:
__snake_case = tuple(state['''cls'''] )
__snake_case = False
if state.get('''add_prefix_space''' , A_ ) != add_prefix_space:
__snake_case = add_prefix_space
__snake_case = True
if state.get('''trim_offsets''' , A_ ) != trim_offsets:
__snake_case = trim_offsets
__snake_case = True
if changes_to_apply:
__snake_case = getattr(A_ , state.pop('''type''' ) )
__snake_case = component_class(**A_ )
setattr(self.backend_tokenizer , A_ , A_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowercase ( self : List[Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase ( self : int , A_ : Union[str, Any] ) -> List[Any]:
__snake_case = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else value
__snake_case = value
def lowercase ( self : Any , *A_ : List[str] , **A_ : Optional[int] ) -> BatchEncoding:
__snake_case = kwargs.get('''is_split_into_words''' , A_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A_ , **A_ )
def lowercase ( self : str , *A_ : Tuple , **A_ : str ) -> BatchEncoding:
__snake_case = kwargs.get('''is_split_into_words''' , A_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A_ , **A_ )
def lowercase ( self : str , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
__snake_case = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
def lowercase ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def lowercase ( self : Optional[Any] , A_ : "Conversation" ) -> List[int]:
__snake_case = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(A_ )
__snake_case = ''' '''.join(A_ )
__snake_case = self.encode(A_ )
if len(A_ ) > self.model_max_length:
__snake_case = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 93 | 0 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__snake_case :Tuple =DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__snake_case :Tuple ='main'
# Default branch name
__snake_case :Union[str, Any] ='f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
__snake_case :List[str] ='aaaaaaa'
# This commit does not exist, so we should 404.
__snake_case :int ='d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
__snake_case :int ='4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def lowerCamelCase_ ( ) -> Dict:
'''simple docstring'''
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def lowerCamelCase_ ( ) -> Dict:
'''simple docstring'''
print('Bonjour!' )
yield
print('Au revoir!' )
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> str:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class lowerCAmelCase__ ( unittest.TestCase ):
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def __UpperCamelCase ( self : str , __UpperCamelCase : Union[str, Any] ) -> Dict:
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def __UpperCamelCase ( self : List[Any] , __UpperCamelCase : Any ) -> Tuple:
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : int ) -> Optional[Any]:
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels'] )
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__UpperCamelCase ) , ['start_positions', 'end_positions'] )
class lowerCAmelCase__ ( _lowerCamelCase ):
pass
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels'] )
@require_tf
def __UpperCamelCase ( self : str ) -> Optional[Any]:
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels'] )
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__UpperCamelCase ) , ['start_positions', 'end_positions'] )
class lowerCAmelCase__ ( _lowerCamelCase ):
pass
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels'] )
@require_flax
def __UpperCamelCase ( self : Dict ) -> Dict:
# Flax models don't have labels
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
class lowerCAmelCase__ ( _lowerCamelCase ):
pass
self.assertEqual(find_labels(__UpperCamelCase ) , [] ) | 106 |
from collections.abc import Callable
import numpy as np
def lowerCamelCase_ ( lowerCAmelCase__ : Callable , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> np.array:
'''simple docstring'''
A = int(np.ceil((x_end - xa) / step_size ) )
A = np.zeros((n + 1,) )
A = ya
A = xa
for k in range(lowerCAmelCase__ ):
A = y[k] + step_size * ode_func(lowerCAmelCase__ , y[k] )
A = y[k] + (
(step_size / 2) * (ode_func(lowerCAmelCase__ , y[k] ) + ode_func(x + step_size , lowerCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 106 | 1 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_UpperCAmelCase = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :List[str] ) -> str:
# save results
if os.path.exists(lowerCAmelCase__ ):
if os.path.exists(os.path.join(lowerCAmelCase__ , """config.json""" ) ) and os.path.isfile(
os.path.join(lowerCAmelCase__ , """config.json""" ) ):
os.remove(os.path.join(lowerCAmelCase__ , """config.json""" ) )
if os.path.exists(os.path.join(lowerCAmelCase__ , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(lowerCAmelCase__ , """pytorch_model.bin""" ) ):
os.remove(os.path.join(lowerCAmelCase__ , """pytorch_model.bin""" ) )
else:
os.makedirs(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :str=False ) -> Optional[Any]:
__lowerCAmelCase : Tuple = 2
if unlogit:
__lowerCAmelCase : Optional[int] = torch.pow(lowerCAmelCase__ , lowerCAmelCase__ )
__lowerCAmelCase : Optional[int] = p * torch.log(lowerCAmelCase__ )
__lowerCAmelCase : str = 0
return -plogp.sum(dim=-1 )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[int] ) -> List[Any]:
logger.info("""lv, h >\t""" + """\t""".join(F'''{x + 1}''' for x in range(len(lowerCAmelCase__ ) ) ) )
for row in range(len(lowerCAmelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :str=None , SCREAMING_SNAKE_CASE :List[Any]=False ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
__lowerCAmelCase : Tuple = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ ).to(args.device )
__lowerCAmelCase : str = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ ).to(args.device )
if head_mask is None:
__lowerCAmelCase : Any = torch.ones(lowerCAmelCase__ , lowerCAmelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCAmelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__lowerCAmelCase : int = None
__lowerCAmelCase : Tuple = 0.0
__lowerCAmelCase : Tuple = 0.0
for step, inputs in enumerate(tqdm(lowerCAmelCase__ , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
__lowerCAmelCase : List[str] = tuple(t.to(args.device ) for t in inputs )
((__lowerCAmelCase ) , ) : List[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__lowerCAmelCase : List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[str] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCAmelCase__ ):
__lowerCAmelCase : Dict = entropy(attn.detach() , lowerCAmelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCAmelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__lowerCAmelCase : Dict = 2
__lowerCAmelCase : List[Any] = torch.pow(torch.pow(lowerCAmelCase__ , lowerCAmelCase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
__lowerCAmelCase : Any = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(lowerCAmelCase__ )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(lowerCAmelCase__ )
logger.info("""Head ranked by importance scores""" )
__lowerCAmelCase : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__lowerCAmelCase : Tuple = torch.arange(
head_importance.numel() , device=args.device )
__lowerCAmelCase : Optional[Any] = head_ranks.view_as(lowerCAmelCase__ )
print_ad_tensor(lowerCAmelCase__ )
return attn_entropy, head_importance, total_loss
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> str:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = compute_heads_importance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , compute_entropy=lowerCAmelCase__ )
__lowerCAmelCase : Any = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , lowerCAmelCase__ , original_score * args.masking_threshold )
__lowerCAmelCase : Optional[Any] = torch.ones_like(lowerCAmelCase__ )
__lowerCAmelCase : Dict = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__lowerCAmelCase : Any = original_score
while current_score >= original_score * args.masking_threshold:
__lowerCAmelCase : Any = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__lowerCAmelCase : Optional[Any] = float("""Inf""" )
__lowerCAmelCase : List[Any] = head_importance.view(-1 ).sort()[1]
if len(lowerCAmelCase__ ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
__lowerCAmelCase : int = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
__lowerCAmelCase : int = new_head_mask.view(-1 )
__lowerCAmelCase : List[str] = 0.0
__lowerCAmelCase : str = new_head_mask.view_as(lowerCAmelCase__ )
__lowerCAmelCase : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(lowerCAmelCase__ )
# Compute metric and head importance again
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = compute_heads_importance(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , compute_entropy=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
__lowerCAmelCase : Dict = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , lowerCAmelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("""Final head mask""" )
print_ad_tensor(lowerCAmelCase__ )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any ) -> Tuple:
__lowerCAmelCase : Union[str, Any] = datetime.now()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = compute_heads_importance(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , compute_entropy=lowerCAmelCase__ , compute_importance=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
__lowerCAmelCase : List[Any] = 1 / loss
__lowerCAmelCase : str = datetime.now() - before_time
__lowerCAmelCase : Optional[int] = sum(p.numel() for p in model.parameters() )
__lowerCAmelCase : str = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowerCAmelCase : Optional[Any] = [
v,
]
assert sum(len(lowerCAmelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCAmelCase__ )
__lowerCAmelCase : int = sum(p.numel() for p in model.parameters() )
__lowerCAmelCase : Any = datetime.now()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = compute_heads_importance(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , compute_entropy=lowerCAmelCase__ , compute_importance=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , actually_pruned=lowerCAmelCase__ , )
__lowerCAmelCase : int = 1 / loss
__lowerCAmelCase : int = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , lowerCAmelCase__ , lowerCAmelCase__ , pruned_num_params / original_num_params * 100 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , lowerCAmelCase__ , lowerCAmelCase__ )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 )
save_model(lowerCAmelCase__ , args.output_dir )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=lowerCAmelCase__ , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=lowerCAmelCase__ , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=lowerCAmelCase__ , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don\'t normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don\'t normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=lowerCAmelCase__ , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=lowerCAmelCase__ , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=lowerCAmelCase__ , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=lowerCAmelCase__ , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=lowerCAmelCase__ , default=42 )
parser.add_argument("""--local_rank""" , type=lowerCAmelCase__ , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=lowerCAmelCase__ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=lowerCAmelCase__ , default="""""" , help="""Can be used for distant debugging.""" )
__lowerCAmelCase : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__lowerCAmelCase : Tuple = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
__lowerCAmelCase : Optional[int] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__lowerCAmelCase : Any = torch.device("""cuda""" , args.local_rank )
__lowerCAmelCase : List[str] = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__lowerCAmelCase : List[str] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__lowerCAmelCase : Tuple = nn.parallel.DistributedDataParallel(
lowerCAmelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase__ )
elif args.n_gpu > 1:
__lowerCAmelCase : Optional[Any] = nn.DataParallel(lowerCAmelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase__ )
torch.save(lowerCAmelCase__ , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , lowerCAmelCase__ )
# Prepare dataset
__lowerCAmelCase : Tuple = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__lowerCAmelCase : Tuple = (torch.from_numpy(lowerCAmelCase__ ),)
__lowerCAmelCase : Any = TensorDataset(*lowerCAmelCase__ )
__lowerCAmelCase : Union[str, Any] = RandomSampler(lowerCAmelCase__ )
__lowerCAmelCase : str = DataLoader(lowerCAmelCase__ , sampler=lowerCAmelCase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__lowerCAmelCase : str = mask_heads(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
prune_heads(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main() | 721 |
from datetime import datetime
import requests
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str ) -> bytes:
__lowerCAmelCase : Optional[Any] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
__lowerCAmelCase : Any = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(SCREAMING_SNAKE_CASE ).content
if __name__ == "__main__":
_UpperCAmelCase = input('Enter Video/IGTV url: ').strip()
_UpperCAmelCase = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''') | 240 | 0 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : complex , lowerCamelCase_ : str = "x" , lowerCamelCase_ : float = 10**-10 , lowerCamelCase_ : int = 1 , ) -> complex:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = symbols(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = lambdify(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = lambdify(lowerCamelCase_ , diff(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : Dict = starting_point
while True:
if diff_function(lowerCamelCase_ ) != 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = prev_guess - multiplicity * func(lowerCamelCase_ ) / diff_function(
lowerCamelCase_ )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
SCREAMING_SNAKE_CASE_ : str = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
F"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
F"""{newton_raphson("exp(x) - 1", 10, precision=0.0_05)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 105 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Any = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''resnet'''
lowerCamelCase__ = ['''basic''', '''bottleneck''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="bottleneck" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
snake_case__ : List[Any] = num_channels
snake_case__ : str = embedding_size
snake_case__ : List[Any] = hidden_sizes
snake_case__ : Dict = depths
snake_case__ : List[Any] = layer_type
snake_case__ : int = hidden_act
snake_case__ : Union[str, Any] = downsample_in_first_stage
snake_case__ : Dict = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Any = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-3
| 38 | 0 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( _a , _a , _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[Any] =StableDiffusionControlNetImgaImgPipeline
__lowerCAmelCase : str =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
__lowerCAmelCase : int =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCAmelCase : Optional[int] =IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
__lowerCAmelCase : Dict =IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
torch.manual_seed(0)
_lowercase =UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, )
torch.manual_seed(0)
_lowercase =ControlNetModel(
block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), )
torch.manual_seed(0)
_lowercase =DDIMScheduler(
beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='scaled_linear', clip_sample=snake_case, set_alpha_to_one=snake_case, )
torch.manual_seed(0)
_lowercase =AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
torch.manual_seed(0)
_lowercase =CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-0_5, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
_lowercase =CLIPTextModel(snake_case)
_lowercase =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_lowercase ={
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase__ ( self :Optional[Any], snake_case :Any, snake_case :List[Any]=0):
"""simple docstring"""
if str(snake_case).startswith('mps'):
_lowercase =torch.manual_seed(snake_case)
else:
_lowercase =torch.Generator(device=snake_case).manual_seed(snake_case)
_lowercase =2
_lowercase =randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=snake_case, device=torch.device(snake_case), )
_lowercase =floats_tensor(control_image.shape, rng=random.Random(snake_case)).to(snake_case)
_lowercase =image.cpu().permute(0, 2, 3, 1)[0]
_lowercase =Image.fromarray(np.uinta(snake_case)).convert('RGB').resize((64, 64))
_lowercase ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
class SCREAMING_SNAKE_CASE_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : List[Any] =StableDiffusionControlNetImgaImgPipeline
__lowerCAmelCase : Optional[Any] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
__lowerCAmelCase : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCAmelCase : Dict =frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
torch.manual_seed(0)
_lowercase =UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, )
torch.manual_seed(0)
def init_weights(snake_case :Dict):
if isinstance(snake_case, torch.nn.Convad):
torch.nn.init.normal(m.weight)
m.bias.data.fill_(1.0)
_lowercase =ControlNetModel(
block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), )
controlneta.controlnet_down_blocks.apply(snake_case)
torch.manual_seed(0)
_lowercase =ControlNetModel(
block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), )
controlneta.controlnet_down_blocks.apply(snake_case)
torch.manual_seed(0)
_lowercase =DDIMScheduler(
beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='scaled_linear', clip_sample=snake_case, set_alpha_to_one=snake_case, )
torch.manual_seed(0)
_lowercase =AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
torch.manual_seed(0)
_lowercase =CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-0_5, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
_lowercase =CLIPTextModel(snake_case)
_lowercase =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_lowercase =MultiControlNetModel([controlneta, controlneta])
_lowercase ={
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase__ ( self :int, snake_case :Any, snake_case :Optional[Any]=0):
"""simple docstring"""
if str(snake_case).startswith('mps'):
_lowercase =torch.manual_seed(snake_case)
else:
_lowercase =torch.Generator(device=snake_case).manual_seed(snake_case)
_lowercase =2
_lowercase =[
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=snake_case, device=torch.device(snake_case), ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=snake_case, device=torch.device(snake_case), ),
]
_lowercase =floats_tensor(control_image[0].shape, rng=random.Random(snake_case)).to(snake_case)
_lowercase =image.cpu().permute(0, 2, 3, 1)[0]
_lowercase =Image.fromarray(np.uinta(snake_case)).convert('RGB').resize((64, 64))
_lowercase ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
_lowercase =self.get_dummy_components()
_lowercase =self.pipeline_class(**snake_case)
pipe.to(snake_case)
_lowercase =1_0.0
_lowercase =4
_lowercase =self.get_dummy_inputs(snake_case)
_lowercase =steps
_lowercase =scale
_lowercase =pipe(**snake_case)[0]
_lowercase =self.get_dummy_inputs(snake_case)
_lowercase =steps
_lowercase =scale
_lowercase =pipe(**snake_case, control_guidance_start=0.1, control_guidance_end=0.2)[0]
_lowercase =self.get_dummy_inputs(snake_case)
_lowercase =steps
_lowercase =scale
_lowercase =pipe(**snake_case, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0]
_lowercase =self.get_dummy_inputs(snake_case)
_lowercase =steps
_lowercase =scale
_lowercase =pipe(**snake_case, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =self.get_dummy_components()
_lowercase =self.pipeline_class(**snake_case)
pipe.to(snake_case)
pipe.set_progress_bar_config(disable=snake_case)
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(snake_case)
except NotImplementedError:
pass
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self :int):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny')
_lowercase =StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', safety_checker=snake_case, controlnet=snake_case)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=snake_case)
_lowercase =torch.Generator(device='cpu').manual_seed(0)
_lowercase ='evil space-punk bird'
_lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png').resize((512, 512))
_lowercase =load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png').resize((512, 512))
_lowercase =pipe(
snake_case, snake_case, control_image=snake_case, generator=snake_case, output_type='np', num_inference_steps=50, strength=0.6, )
_lowercase =output.images[0]
assert image.shape == (512, 512, 3)
_lowercase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy')
assert np.abs(expected_image - image).max() < 9e-2
| 557 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : Tuple ='''visual_bert'''
def __init__( self :Any, snake_case :Dict=3_0522, snake_case :Optional[Any]=768, snake_case :Optional[Any]=512, snake_case :Tuple=12, snake_case :List[Any]=12, snake_case :List[Any]=3072, snake_case :Optional[int]="gelu", snake_case :Union[str, Any]=0.1, snake_case :List[str]=0.1, snake_case :Optional[Any]=512, snake_case :Tuple=2, snake_case :Optional[int]=0.0_2, snake_case :Union[str, Any]=1e-1_2, snake_case :Union[str, Any]=False, snake_case :Optional[Any]=True, snake_case :Tuple=1, snake_case :int=0, snake_case :Any=2, **snake_case :Union[str, Any], ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case, bos_token_id=snake_case, eos_token_id=snake_case, **snake_case)
_lowercase =vocab_size
_lowercase =max_position_embeddings
_lowercase =hidden_size
_lowercase =visual_embedding_dim
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =initializer_range
_lowercase =type_vocab_size
_lowercase =layer_norm_eps
_lowercase =bypass_transformer
_lowercase =special_visual_initialize
| 557 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'vocab_file': 'spiece.model'}
lowercase_ = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
lowercase_ = {'bert_for_seq_generation': 5_12}
class __a ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self : str , snake_case_ : List[str] , snake_case_ : Tuple="<s>" , snake_case_ : Optional[Any]="</s>" , snake_case_ : int="<unk>" , snake_case_ : Tuple="<pad>" , snake_case_ : Any="<::::>" , snake_case_ : Union[str, Any] = None , **snake_case_ : List[str] , )-> Any:
__lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , sep_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
__lowerCAmelCase =vocab_file
__lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(snake_case_)
@property
def UpperCamelCase ( self : Union[str, Any])-> Tuple:
return self.sp_model.get_piece_size()
def UpperCamelCase ( self : int)-> Any:
__lowerCAmelCase ={self.convert_ids_to_tokens(snake_case_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Dict)-> List[str]:
__lowerCAmelCase =self.__dict__.copy()
__lowerCAmelCase =None
return state
def __setstate__( self : Dict , snake_case_ : Optional[Any])-> Union[str, Any]:
__lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
__lowerCAmelCase ={}
__lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCamelCase ( self : List[str] , snake_case_ : Union[str, Any])-> Dict:
return self.sp_model.encode(snake_case_ , out_type=snake_case_)
def UpperCamelCase ( self : Tuple , snake_case_ : List[Any])-> Optional[Any]:
return self.sp_model.piece_to_id(snake_case_)
def UpperCamelCase ( self : List[str] , snake_case_ : List[Any])-> Union[str, Any]:
__lowerCAmelCase =self.sp_model.IdToPiece(snake_case_)
return token
def UpperCamelCase ( self : Optional[int] , snake_case_ : List[Any])-> int:
__lowerCAmelCase =[]
__lowerCAmelCase =""""""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_) + token
__lowerCAmelCase =[]
else:
current_sub_tokens.append(snake_case_)
out_string += self.sp_model.decode(snake_case_)
return out_string.strip()
def UpperCamelCase ( self : Any , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] = None)-> List[str]:
if not os.path.isdir(snake_case_):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
__lowerCAmelCase =os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(snake_case_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , snake_case_)
elif not os.path.isfile(self.vocab_file):
with open(snake_case_ , """wb""") as fi:
__lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(snake_case_)
return (out_vocab_file,)
| 354 |
import math
import flax.linen as nn
import jax.numpy as jnp
def UpperCamelCase__ ( _A: jnp.ndarray , _A: int , _A: float = 1 , _A: float = 1 , _A: float = 1.0e4 , _A: bool = False , _A: float = 1.0 , ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
__lowerCamelCase = float(embedding_dim // 2 )
__lowerCamelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
__lowerCamelCase = min_timescale * jnp.exp(jnp.arange(_A , dtype=jnp.floataa ) * -log_timescale_increment )
__lowerCamelCase = jnp.expand_dims(_A , 1 ) * jnp.expand_dims(_A , 0 )
# scale embeddings
__lowerCamelCase = scale * emb
if flip_sin_to_cos:
__lowerCamelCase = jnp.concatenate([jnp.cos(_A ), jnp.sin(_A )] , axis=1 )
else:
__lowerCamelCase = jnp.concatenate([jnp.sin(_A ), jnp.cos(_A )] , axis=1 )
__lowerCamelCase = jnp.reshape(_A , [jnp.shape(_A )[0], embedding_dim] )
return signal
class UpperCamelCase_ ( nn.Module ):
"""simple docstring"""
A = 32
A = jnp.floataa
@nn.compact
def __call__( self , UpperCAmelCase ):
__lowerCamelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""" )(UpperCAmelCase )
__lowerCamelCase = nn.silu(UpperCAmelCase )
__lowerCamelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""" )(UpperCAmelCase )
return temb
class UpperCamelCase_ ( nn.Module ):
"""simple docstring"""
A = 32
A = False
A = 1
@nn.compact
def __call__( self , UpperCAmelCase ):
return get_sinusoidal_embeddings(
UpperCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 479 | 0 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=3_0_5_2_2, type=int)
_SCREAMING_SNAKE_CASE = parser.parse_args()
logger.info(f'Loading data from {args.data_file}')
with open(args.data_file, """rb""") as fp:
_SCREAMING_SNAKE_CASE = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
_SCREAMING_SNAKE_CASE = Counter()
for tk_ids in data:
counter.update(tk_ids)
_SCREAMING_SNAKE_CASE = [0] * args.vocab_size
for k, v in counter.items():
_SCREAMING_SNAKE_CASE = v
logger.info(f'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 239 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowercase_ (self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = {}
_UpperCamelCase : Optional[int] = {}
if prompt is not None:
_UpperCamelCase : Optional[int] = prompt
if generate_kwargs is not None:
_UpperCamelCase : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_UpperCamelCase : List[Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
_UpperCamelCase : Optional[int] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__(self , lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
_UpperCamelCase : Dict = load_image(lowerCAmelCase__ )
if prompt is not None:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(
F"Received an invalid text input, got - {type(lowerCAmelCase__ )} - but expected a single string. "
"Note also that one single text can be provided for conditional image to text generation." )
_UpperCamelCase : Dict = self.model.config.model_type
if model_type == "git":
_UpperCamelCase : Union[str, Any] = self.image_processor(images=lowerCAmelCase__ , return_tensors=self.framework )
_UpperCamelCase : Optional[int] = self.tokenizer(text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids
_UpperCamelCase : int = [self.tokenizer.cls_token_id] + input_ids
_UpperCamelCase : Optional[int] = torch.tensor(lowerCAmelCase__ ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
_UpperCamelCase : Optional[int] = self.image_processor(images=lowerCAmelCase__ , header_text=lowerCAmelCase__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_UpperCamelCase : Tuple = self.image_processor(images=lowerCAmelCase__ , return_tensors=self.framework )
_UpperCamelCase : List[str] = self.tokenizer(lowerCAmelCase__ , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase__ )
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation" )
else:
_UpperCamelCase : Union[str, Any] = self.image_processor(images=lowerCAmelCase__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_UpperCamelCase : Union[str, Any] = None
return model_inputs
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , lowerCAmelCase__ )
and all(x is None for x in model_inputs["input_ids"] )
):
_UpperCamelCase : Optional[Any] = None
if generate_kwargs is None:
_UpperCamelCase : Dict = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_UpperCamelCase : Any = model_inputs.pop(self.model.main_input_name )
_UpperCamelCase : Tuple = self.model.generate(lowerCAmelCase__ , **lowerCAmelCase__ , **lowerCAmelCase__ )
return model_outputs
def lowercase_ (self , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = []
for output_ids in model_outputs:
_UpperCamelCase : Tuple = {
"generated_text": self.tokenizer.decode(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , )
}
records.append(lowerCAmelCase__ )
return records
| 239 | 1 |
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__snake_case = HfArgumentParser(InitializationArguments)
__snake_case = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__snake_case = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__snake_case = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
__snake_case = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__snake_case = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 451 |
import os
lowerCAmelCase__ : str ={'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def __lowercase ( a__ ) -> int:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while index < len(a__ ) - 1:
__SCREAMING_SNAKE_CASE = SYMBOLS[numerals[index]]
__SCREAMING_SNAKE_CASE = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __lowercase ( a__ ) -> str:
__SCREAMING_SNAKE_CASE = ''
__SCREAMING_SNAKE_CASE = num // 10_00
numerals += m_count * "M"
num %= 10_00
__SCREAMING_SNAKE_CASE = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
__SCREAMING_SNAKE_CASE = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __lowercase ( a__ = "/p089_roman.txt" ) -> int:
__SCREAMING_SNAKE_CASE = 0
with open(os.path.dirname(a__ ) + roman_numerals_filename ) as filea:
__SCREAMING_SNAKE_CASE = filea.readlines()
for line in lines:
__SCREAMING_SNAKE_CASE = line.strip()
__SCREAMING_SNAKE_CASE = parse_roman_numerals(a__ )
__SCREAMING_SNAKE_CASE = generate_roman_numerals(a__ )
savings += len(a__ ) - len(a__ )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 148 | 0 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( A__ : Optional[int] , A__ : List[Any] , A__ : Any ):
SCREAMING_SNAKE_CASE = BertConfig.from_json_file(A__ )
print(F"Building PyTorch model from configuration: {config}" )
SCREAMING_SNAKE_CASE = BertForPreTraining(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(A__ , A__ , A__ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__A : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 703 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Union[str, Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 698 | 0 |
"""simple docstring"""
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a = None , _a = None , _a = False , _a = False , _a = None , _a = None , **_a , ):
"""simple docstring"""
super().__init__(
features=_a , cache_dir=_a , keep_in_memory=_a , streaming=_a , num_proc=_a , **_a , )
lowerCamelCase = Generator(
cache_dir=_a , features=_a , generator=_a , gen_kwargs=_a , **_a , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
# Build iterable dataset
if self.streaming:
lowerCamelCase = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
self.builder.download_and_prepare(
download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , num_proc=self.num_proc , )
lowerCamelCase = self.builder.as_dataset(
split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory )
return dataset
| 543 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "mctct"
def __init__( self , _a=8_065 , _a=1_536 , _a=36 , _a=6_144 , _a=4 , _a=384 , _a=920 , _a=1e-5 , _a=0.3 , _a="relu" , _a=0.02 , _a=0.3 , _a=0.3 , _a=1 , _a=0 , _a=2 , _a=1 , _a=0.3 , _a=1 , _a=(7,) , _a=(3,) , _a=80 , _a=1 , _a=None , _a="sum" , _a=False , **_a , ):
"""simple docstring"""
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = intermediate_size
lowerCamelCase = num_attention_heads
lowerCamelCase = attention_head_dim
lowerCamelCase = max_position_embeddings
lowerCamelCase = layer_norm_eps
lowerCamelCase = layerdrop
lowerCamelCase = hidden_act
lowerCamelCase = initializer_range
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = pad_token_id
lowerCamelCase = bos_token_id
lowerCamelCase = eos_token_id
lowerCamelCase = conv_glu_dim
lowerCamelCase = conv_dropout
lowerCamelCase = num_conv_layers
lowerCamelCase = input_feat_per_channel
lowerCamelCase = input_channels
lowerCamelCase = conv_channels
lowerCamelCase = ctc_loss_reduction
lowerCamelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
lowerCamelCase = list(_a )
lowerCamelCase = list(_a )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
f'but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '
f'`config.num_conv_layers = {self.num_conv_layers}`.' )
| 543 | 1 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : Optional[Any] = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = DebertaVaTokenizer
_lowercase = DebertaVaTokenizerFast
_lowercase = True
_lowercase = True
def _UpperCamelCase( self : str ):
super().setUp()
# We have a SentencePiece fixture for testing
a__ : Any = DebertaVaTokenizer(lowerCamelCase__ , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ):
a__ : str = "this is a test"
a__ : Tuple = "this is a test"
return input_text, output_text
def _UpperCamelCase( self : List[str] ):
a__ : Optional[Any] = "<pad>"
a__ : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def _UpperCamelCase( self : Tuple ):
a__ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(lowerCamelCase__ ) , 30_001 )
def _UpperCamelCase( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def _UpperCamelCase( self : Tuple ):
# fmt: off
a__ : List[str] = " \tHeLLo!how \n Are yoU? "
a__ : Tuple = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
a__ : Tuple = DebertaVaTokenizer(lowerCamelCase__ , do_lower_case=lowerCamelCase__ )
a__ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : Optional[Any] = DebertaVaTokenizerFast(lowerCamelCase__ , do_lower_case=lowerCamelCase__ )
a__ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _UpperCamelCase( self : Tuple ):
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _UpperCamelCase( self : Tuple ):
pass
def _UpperCamelCase( self : Union[str, Any] ):
# fmt: off
a__ : List[Any] = "I was born in 92000, and this is falsé."
a__ : int = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
a__ : List[Any] = DebertaVaTokenizer(lowerCamelCase__ , split_by_punct=lowerCamelCase__ )
a__ : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : Union[str, Any] = DebertaVaTokenizerFast(lowerCamelCase__ , split_by_punct=lowerCamelCase__ )
a__ : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : int ):
# fmt: off
a__ : Union[str, Any] = "I was born in 92000, and this is falsé."
a__ : Any = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
a__ : List[str] = DebertaVaTokenizer(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ )
a__ : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : Tuple = DebertaVaTokenizerFast(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ )
a__ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
# fmt: off
a__ : List[Any] = "I was born in 92000, and this is falsé."
a__ : Tuple = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
a__ : List[str] = DebertaVaTokenizer(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ )
a__ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : str = DebertaVaTokenizerFast(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ )
a__ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
# fmt: off
a__ : Optional[Any] = "I was born in 92000, and this is falsé."
a__ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
a__ : str = DebertaVaTokenizer(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ )
a__ : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : Optional[int] = DebertaVaTokenizerFast(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ )
a__ : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] ):
# fmt: off
a__ : Optional[int] = " \tHeLLo!how \n Are yoU? "
a__ : int = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
a__ : int = DebertaVaTokenizer(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ )
a__ : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : Union[str, Any] = DebertaVaTokenizerFast(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ )
a__ : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : List[str] ):
a__ : List[Any] = self.get_tokenizer()
a__ : Union[str, Any] = self.get_rust_tokenizer()
a__ : Union[str, Any] = "I was born in 92000, and this is falsé."
a__ : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
a__ : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : Union[str, Any] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
a__ : str = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : str = self.get_rust_tokenizer()
a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ )
a__ : int = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : List[str] ):
a__ : Optional[int] = "This is a test"
a__ : int = [13, 1, 4_398, 25, 21, 1_289]
a__ : List[str] = ["▁", "T", "his", "▁is", "▁a", "▁test"]
a__ : Union[str, Any] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
a__ : Optional[int] = DebertaVaTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
a__ : Union[str, Any] = DebertaVaTokenizerFast(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
a__ : Optional[Any] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : str = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : List[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : Optional[Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : int = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : List[str] = rust_tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# fmt: off
a__ : int = "I was born in 92000, and this is falsé."
a__ : List[Any] = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
a__ : List[str] = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
a__ : str = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
a__ : List[str] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : Any = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : Any = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : str = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : Tuple = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : Tuple = rust_tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : List[str] ):
a__ : Optional[int] = DebertaVaTokenizer(lowerCamelCase__ )
a__ : int = tokenizer.encode("sequence builders" )
a__ : List[Any] = tokenizer.encode("multi-sequence build" )
a__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
a__ : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , lowerCamelCase__ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , lowerCamelCase__ , )
@slow
def _UpperCamelCase( self : int ):
# fmt: off
a__ : Union[str, Any] = {"input_ids": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 151 |
def UpperCamelCase_ ( __a ) -> bool:
a__ : Tuple = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def UpperCamelCase_ ( __a = 5_000 ) -> int:
a__ : List[Any] = [(i * (3 * i - 1)) // 2 for i in range(1 , __a )]
for i, pentagonal_i in enumerate(__a ):
for j in range(__a , len(__a ) ):
a__ : Dict = pentagonal_nums[j]
a__ : Optional[Any] = pentagonal_i + pentagonal_j
a__ : str = pentagonal_j - pentagonal_i
if is_pentagonal(__a ) and is_pentagonal(__a ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 151 | 1 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowerCamelCase_ : List[str] = {
# 1536-bit
5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 2048-bit
14: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AACAA68FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 3072-bit
15: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 4096-bit
16: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"""
+ """FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 6144-bit
17: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"""
+ """8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"""
+ """302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"""
+ """A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"""
+ """49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"""
+ """FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"""
+ """180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"""
+ """3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"""
+ """04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"""
+ """B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"""
+ """1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"""
+ """E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"""
+ """99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"""
+ """04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"""
+ """233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"""
+ """D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"""
+ """AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"""
+ """DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"""
+ """2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"""
+ """F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"""
+ """BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"""
+ """B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"""
+ """387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"""
+ """6DCC4024FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 8192-bit
18: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"""
+ """F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"""
+ """179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"""
+ """DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"""
+ """5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"""
+ """D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"""
+ """23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"""
+ """06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"""
+ """DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"""
+ """12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"""
+ """38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"""
+ """741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"""
+ """3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"""
+ """22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"""
+ """4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"""
+ """062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"""
+ """4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"""
+ """B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"""
+ """4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"""
+ """9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"""
+ """60C980DD98EDD3DFFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
}
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Tuple , snake_case_ : int = 14 ):
if group not in primes:
raise ValueError("""Unsupported Group""" )
UpperCamelCase_: Tuple = primes[group]["""prime"""]
UpperCamelCase_: List[Any] = primes[group]["""generator"""]
UpperCamelCase_: Tuple = int(hexlify(urandom(32 ) ) , base=16 )
def lowerCAmelCase__ ( self : List[str] ):
return hex(self.__private_key )[2:]
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Union[str, Any] = pow(self.generator , self.__private_key , self.prime )
return hex(_lowercase )[2:]
def lowerCAmelCase__ ( self : int , snake_case_ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_lowercase , (self.prime - 1) // 2 , self.prime ) == 1
)
def lowerCAmelCase__ ( self : List[str] , snake_case_ : str ):
UpperCamelCase_: Any = int(_lowercase , base=16 )
if not self.is_valid_public_key(_lowercase ):
raise ValueError("""Invalid public key""" )
UpperCamelCase_: Tuple = pow(_lowercase , self.__private_key , self.prime )
return shaaaa(str(_lowercase ).encode() ).hexdigest()
@staticmethod
def lowerCAmelCase__ ( snake_case_ : int , snake_case_ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_lowercase , (prime - 1) // 2 , _lowercase ) == 1
)
@staticmethod
def lowerCAmelCase__ ( snake_case_ : str , snake_case_ : str , snake_case_ : int = 14 ):
UpperCamelCase_: List[str] = int(_lowercase , base=16 )
UpperCamelCase_: int = int(_lowercase , base=16 )
UpperCamelCase_: Optional[int] = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(_lowercase , _lowercase ):
raise ValueError("""Invalid public key""" )
UpperCamelCase_: Union[str, Any] = pow(_lowercase , _lowercase , _lowercase )
return shaaaa(str(_lowercase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 548 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( lowerCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = CTRLTokenizer
_UpperCamelCase = False
_UpperCamelCase = False
def __snake_case ( self : str) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
A_ = dict(zip(_lowercase , range(len(_lowercase))))
A_ = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
A_ = {'unk_token': '<unk>'}
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(_lowercase) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(_lowercase))
def __snake_case ( self : Optional[int] , **_lowercase : Optional[int]) -> Union[str, Any]:
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_lowercase)
def __snake_case ( self : List[Any] , _lowercase : Union[str, Any]) -> Dict:
A_ = 'adapt react readapt apt'
A_ = 'adapt react readapt apt'
return input_text, output_text
def __snake_case ( self : Any) -> Any:
A_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
A_ = 'adapt react readapt apt'
A_ = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
A_ = tokenizer.tokenize(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
A_ = tokens + [tokenizer.unk_token]
A_ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase) , _lowercase)
| 366 | 0 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
lowerCAmelCase : Union[str, Any] = 0b1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
lowerCAmelCase : Optional[int] = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class SCREAMING_SNAKE_CASE__ :
def __init__( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = WATERMARK_BITS
UpperCamelCase = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCAmelCase_ ( self , A_ )-> int:
'''simple docstring'''
if images.shape[-1] < 256:
return images
UpperCamelCase = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase = [self.encoder.encode(A_ , 'dwtDct' ) for image in images]
UpperCamelCase = torch.from_numpy(np.array(A_ ) ).permute(0 , 3 , 1 , 2 )
UpperCamelCase = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 703 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Any = logging.get_logger(__name__)
def A_( A : List[Any] , A : Union[str, Any]=False):
UpperCamelCase = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token'))
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings'))
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'))
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'))
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'))
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'))
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight'''))
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight'''))
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias'''))
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight'''))
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight'''))
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias'''))
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight'''))
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight'''))
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias'''))
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight'''))
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight'''))
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias'''))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight'''))
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias'''))
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight'''))
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias'''))
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight'''))
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias'''))
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight'''))
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias'''))
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight'''))
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias'''))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
])
# fmt: on
return rename_keys
def A_( A : Optional[int] , A : str , A : str=False):
for i in range(config.num_hidden_layers):
if base_model:
UpperCamelCase = ''
else:
UpperCamelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''')
UpperCamelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase = in_proj_bias[: config.hidden_size]
UpperCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase = in_proj_bias[-config.hidden_size :]
def A_( A : Optional[Any]):
UpperCamelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(A , A)
def A_( A : Union[str, Any] , A : List[str] , A : Optional[Any]):
UpperCamelCase = dct.pop(A)
UpperCamelCase = val
def A_( ):
UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase = Image.open(requests.get(A , stream=A).raw)
return im
@torch.no_grad()
def A_( A : Optional[Any] , A : Optional[int] , A : Union[str, Any]=False):
UpperCamelCase = BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=A , )
UpperCamelCase = ViTHybridConfig(backbone_config=A , image_size=384 , num_labels=1000)
UpperCamelCase = False
# load original model from timm
UpperCamelCase = timm.create_model(A , pretrained=A)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(A)
UpperCamelCase = create_rename_keys(A , A)
for src, dest in rename_keys:
rename_key(A , A , A)
read_in_q_k_v(A , A , A)
UpperCamelCase = 'huggingface/label-files'
UpperCamelCase = 'imagenet-1k-id2label.json'
UpperCamelCase = json.load(open(hf_hub_download(A , A , repo_type='dataset') , 'r'))
UpperCamelCase = {int(A): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
UpperCamelCase = ViTHybridModel(A).eval()
else:
UpperCamelCase = ViTHybridForImageClassification(A).eval()
model.load_state_dict(A)
# create image processor
UpperCamelCase = create_transform(**resolve_data_config({} , model=A))
UpperCamelCase = transform.transforms
UpperCamelCase = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
UpperCamelCase = ViTHybridImageProcessor(
do_resize=A , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=A , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCamelCase = prepare_img()
UpperCamelCase = transform(A).unsqueeze(0)
UpperCamelCase = processor(A , return_tensors='pt').pixel_values
# verify pixel values
assert torch.allclose(A , A)
# verify logits
with torch.no_grad():
UpperCamelCase = model(A)
UpperCamelCase = outputs.logits
print('Predicted class:' , logits.argmax(-1).item())
if base_model:
UpperCamelCase = timm_model.forward_features(A)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(A , outputs.pooler_output , atol=1E-3)
else:
UpperCamelCase = timm_model(A)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A , outputs.logits , atol=1E-3)
print('Looks ok!')
if pytorch_dump_folder_path is not None:
Path(A).mkdir(exist_ok=A)
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(A)
print(f'''Saving processor to {pytorch_dump_folder_path}''')
processor.save_pretrained(A)
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''')
model.push_to_hub(f'''ybelkada/{vit_name}''')
processor.push_to_hub(f'''ybelkada/{vit_name}''')
if __name__ == "__main__":
lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
lowerCAmelCase : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 432 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = tf.convert_to_tensor(
[
[
8.222_0991, # 3rd highest value; idx. 0
-0.562_0044,
5.2322_9752,
4.038_6393,
-6.879_8378,
-0.5478_5802,
-3.201_2153,
2.9277_7176,
1.8817_1953,
7.3534_1276, # 5th highest value; idx. 9
8.4320_7833, # 2nd highest value; idx. 10
-9.8571_1836,
-5.9620_9236,
-1.1303_9161,
-7.111_5294,
-0.836_9633,
-5.318_6408,
7.0642_7407,
0.8136_9344,
-0.8202_3817,
-5.917_9796,
0.5881_3443,
-6.9977_8438,
4.7155_1189,
-0.1877_1637,
7.4402_0759, # 4th highest value; idx. 25
9.3845_0987, # 1st highest value; idx. 26
2.1266_2941,
-9.3256_2038,
2.3565_2522,
], # cummulative prob of 5 highest values <= 0.6
[
0.5842_5518,
4.5313_9238,
-5.5751_0464,
-6.2803_0699,
-7.1952_9503,
-4.0212_2551,
1.3933_7037,
-6.0670_7057,
1.5948_0517,
-9.64_3119,
0.0390_7799,
0.6723_1762,
-8.8820_6726,
6.2711_5922, # 4th highest value; idx. 13
2.2852_0723,
4.8276_7506,
4.3042_1368,
8.827_5313, # 2nd highest value; idx. 17
5.4402_9958, # 5th highest value; idx. 18
-4.473_5794,
7.3857_9536, # 3rd highest value; idx. 20
-2.9105_1663,
2.6194_6077,
-2.567_4762,
-9.4895_9302,
-4.0292_2645,
-1.3541_6918,
9.6770_2323, # 1st highest value; idx. 27
-5.8947_8553,
1.8537_0467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__a = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__a = tf.convert_to_tensor(
[8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above
__a = tf_top_k_top_p_filtering(snake_case_ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
__a = output[output != -float('''inf''' )]
__a = tf.cast(
tf.where(tf.not_equal(snake_case_ , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(snake_case_ , snake_case_ , rtol=1E-12 )
tf.debugging.assert_equal(snake_case_ , snake_case_ )
@require_tf
class __lowerCAmelCase ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if is_tf_available():
__UpperCAmelCase : Optional[int] = {
'AutoModelForCausalLM': TFAutoModelForCausalLM,
'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq,
'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM,
'AutoModelForVision2Seq': TFAutoModelForVisionaSeq,
'LogitsProcessorList': TFLogitsProcessorList,
'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor,
'create_tensor_fn': tf.convert_to_tensor,
'floats_tensor': floats_tensor,
'return_tensors': 'tf',
}
@slow
def __UpperCAmelCase ( self ):
# TF-only test: tf.saved_model export
__a = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__a = 2
__a = 2
class __lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self , _a ):
super(snake_case_ , self ).__init__()
__a = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=snake_case_ , )
def __UpperCAmelCase ( self , _a , _a ):
__a = self.model.generate(
input_ids=snake_case_ , attention_mask=snake_case_ , max_new_tokens=snake_case_ , return_dict_in_generate=snake_case_ , )
return {"sequences": outputs["sequences"]}
__a = [[2, 0], [102, 103]]
__a = [[1, 0], [1, 1]]
__a = DummyModel(model=snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(snake_case_ , snake_case_ , signatures={'''serving_default''': dummy_model.serving} )
__a = tf.saved_model.load(snake_case_ ).signatures['''serving_default''']
for batch_size in range(1 , len(snake_case_ ) + 1 ):
__a = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
__a = serving_func(**snake_case_ )['''sequences''']
__a = test_model.generate(**snake_case_ , max_new_tokens=snake_case_ )
tf.debugging.assert_equal(snake_case_ , snake_case_ )
@slow
def __UpperCAmelCase ( self ):
# TF-only test: tf.saved_model export
__a = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__a = 1
__a = 2
class __lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self , _a ):
super(snake_case_ , self ).__init__()
__a = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=snake_case_ , )
def __UpperCAmelCase ( self , _a , _a ):
__a = self.model.generate(
input_ids=snake_case_ , attention_mask=snake_case_ , max_new_tokens=snake_case_ , return_dict_in_generate=snake_case_ , )
return {"sequences": outputs["sequences"]}
__a = [[2], [102, 103]]
__a = [[1], [1, 1]]
__a = DummyModel(model=snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(snake_case_ , snake_case_ , signatures={'''serving_default''': dummy_model.serving} )
__a = tf.saved_model.load(snake_case_ ).signatures['''serving_default''']
for input_row in range(len(snake_case_ ) ):
__a = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
__a = serving_func(**snake_case_ )['''sequences''']
__a = test_model.generate(**snake_case_ , max_new_tokens=snake_case_ )
tf.debugging.assert_equal(snake_case_ , snake_case_ )
@slow
@require_tensorflow_text
def __UpperCAmelCase ( self ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=snake_case_ )
class __lowerCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self ):
super().__init__()
__a = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(snake_case_ , '''spiece.model''' ) , '''rb''' ).read() )
__a = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def __UpperCAmelCase ( self , _a , *_a , **_a ):
__a = self.tokenizer.tokenize(snake_case_ )
__a , __a = text.pad_model_inputs(
snake_case_ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
__a = self.model.generate(input_ids=snake_case_ , attention_mask=snake_case_ )
return self.tokenizer.detokenize(snake_case_ )
__a = CompleteSentenceTransformer()
__a = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
__a = complete_model(snake_case_ )
__a = tf.keras.Model(snake_case_ , snake_case_ )
keras_model.save(snake_case_ )
def __UpperCAmelCase ( self ):
# Has PT equivalent: this test relies on random sampling
__a = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
__a = 14
__a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__a = '''Hello, my dog is cute and'''
__a = tokenizer(snake_case_ , return_tensors='''tf''' )
__a = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__a = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
__a = model.generate(**snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__a = [638, 198]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
__a = model.generate(**snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def __UpperCAmelCase ( self ):
# Has PT equivalent: ample use of framework-specific code
__a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
__a = '''Hugging Face is a technology company based in New York and Paris.'''
__a = bart_tokenizer(snake_case_ , return_tensors='''tf''' ).input_ids
__a = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
__a = bart_model.generate(snake_case_ ).numpy()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a , _a=None , **_a ):
return super().call(snake_case_ , **snake_case_ )
__a = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
__a = bart_model.generate(snake_case_ , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(snake_case_ , snake_case_ ) )
class __lowerCAmelCase ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a , **_a ):
return super().call(snake_case_ , **snake_case_ )
__a = FakeEncoder(bart_model.config , bart_model.model.shared )
__a = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__a = bart_model.generate(snake_case_ ).numpy()
with self.assertRaises(snake_case_ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(snake_case_ , foo='''bar''' )
| 695 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a ( SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] , snake_case_ : Tuple , snake_case_ : Dict=13 , snake_case_ : List[Any]=7 , snake_case_ : Optional[int]=True , snake_case_ : Tuple=True , snake_case_ : Optional[int]=True , snake_case_ : Optional[int]=True , snake_case_ : Any=True , snake_case_ : Any=False , snake_case_ : Union[str, Any]=False , snake_case_ : Dict=False , snake_case_ : str=2 , snake_case_ : Optional[int]=99 , snake_case_ : Any=0 , snake_case_ : Dict=32 , snake_case_ : Optional[Any]=5 , snake_case_ : Optional[Any]=4 , snake_case_ : Tuple=0.1 , snake_case_ : Any=0.1 , snake_case_ : str=5_12 , snake_case_ : List[str]=12 , snake_case_ : List[Any]=2 , snake_case_ : Optional[int]=0.0_2 , snake_case_ : Dict=3 , snake_case_ : Dict=4 , snake_case_ : Any="last" , snake_case_ : List[Any]=None , snake_case_ : int=None , )-> List[Any]:
__lowerCAmelCase =parent
__lowerCAmelCase =batch_size
__lowerCAmelCase =seq_length
__lowerCAmelCase =is_training
__lowerCAmelCase =use_input_lengths
__lowerCAmelCase =use_token_type_ids
__lowerCAmelCase =use_labels
__lowerCAmelCase =gelu_activation
__lowerCAmelCase =sinusoidal_embeddings
__lowerCAmelCase =causal
__lowerCAmelCase =asm
__lowerCAmelCase =n_langs
__lowerCAmelCase =vocab_size
__lowerCAmelCase =n_special
__lowerCAmelCase =hidden_size
__lowerCAmelCase =num_hidden_layers
__lowerCAmelCase =num_attention_heads
__lowerCAmelCase =hidden_dropout_prob
__lowerCAmelCase =attention_probs_dropout_prob
__lowerCAmelCase =max_position_embeddings
__lowerCAmelCase =type_vocab_size
__lowerCAmelCase =type_sequence_label_size
__lowerCAmelCase =initializer_range
__lowerCAmelCase =num_labels
__lowerCAmelCase =num_choices
__lowerCAmelCase =summary_type
__lowerCAmelCase =use_proj
__lowerCAmelCase =scope
def UpperCamelCase ( self : List[Any])-> str:
__lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase =None
if self.use_input_lengths:
__lowerCAmelCase =(
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
__lowerCAmelCase =None
if self.use_token_type_ids:
__lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
__lowerCAmelCase =None
__lowerCAmelCase =None
__lowerCAmelCase =None
if self.use_labels:
__lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__lowerCAmelCase =ids_tensor([self.batch_size] , 2).float()
__lowerCAmelCase =ids_tensor([self.batch_size] , self.num_choices)
__lowerCAmelCase =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase ( self : Union[str, Any])-> Union[str, Any]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCamelCase ( self : Any , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : int , )-> Tuple:
__lowerCAmelCase =FlaubertModel(config=snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(snake_case_ , lengths=snake_case_ , langs=snake_case_)
__lowerCAmelCase =model(snake_case_ , langs=snake_case_)
__lowerCAmelCase =model(snake_case_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self : Optional[int] , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : str , )-> str:
__lowerCAmelCase =FlaubertWithLMHeadModel(snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self : Optional[Any] , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Any , )-> Tuple:
__lowerCAmelCase =FlaubertForQuestionAnsweringSimple(snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(snake_case_)
__lowerCAmelCase =model(snake_case_ , start_positions=snake_case_ , end_positions=snake_case_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCamelCase ( self : List[str] , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : Optional[Any] , )-> Optional[int]:
__lowerCAmelCase =FlaubertForQuestionAnswering(snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(snake_case_)
__lowerCAmelCase =model(
snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , cls_index=snake_case_ , is_impossible=snake_case_ , p_mask=snake_case_ , )
__lowerCAmelCase =model(
snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , cls_index=snake_case_ , is_impossible=snake_case_ , )
((__lowerCAmelCase) , ) =result_with_labels.to_tuple()
__lowerCAmelCase =model(snake_case_ , start_positions=snake_case_ , end_positions=snake_case_)
((__lowerCAmelCase) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def UpperCamelCase ( self : List[Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : int , snake_case_ : Tuple , snake_case_ : int , snake_case_ : str , snake_case_ : Dict , )-> Tuple:
__lowerCAmelCase =FlaubertForSequenceClassification(snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(snake_case_)
__lowerCAmelCase =model(snake_case_ , labels=snake_case_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase ( self : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Tuple , )-> Any:
__lowerCAmelCase =self.num_labels
__lowerCAmelCase =FlaubertForTokenClassification(snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , )-> Optional[int]:
__lowerCAmelCase =self.num_choices
__lowerCAmelCase =FlaubertForMultipleChoice(config=snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase =token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase =input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase =model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCamelCase ( self : int)-> Dict:
__lowerCAmelCase =self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) =config_and_inputs
__lowerCAmelCase ={
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase ( self : List[str] , snake_case_ : Any , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Dict , snake_case_ : Dict)-> str:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase ( self : int , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : int=False)-> Optional[Any]:
__lowerCAmelCase =super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__lowerCAmelCase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_)
__lowerCAmelCase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_)
return inputs_dict
def UpperCamelCase ( self : Dict)-> Optional[Any]:
__lowerCAmelCase =FlaubertModelTester(self)
__lowerCAmelCase =ConfigTester(self , config_class=snake_case_ , emb_dim=37)
def UpperCamelCase ( self : int)-> Dict:
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Any)-> Tuple:
__lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case_)
def UpperCamelCase ( self : Union[str, Any])-> List[str]:
__lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case_)
def UpperCamelCase ( self : Any)-> Any:
__lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*snake_case_)
def UpperCamelCase ( self : Optional[int])-> List[str]:
__lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case_)
def UpperCamelCase ( self : List[Any])-> Optional[Any]:
__lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case_)
def UpperCamelCase ( self : Tuple)-> Union[str, Any]:
__lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*snake_case_)
def UpperCamelCase ( self : List[Any])-> Any:
__lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case_)
@slow
def UpperCamelCase ( self : List[str])-> Tuple:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase =FlaubertModel.from_pretrained(snake_case_)
self.assertIsNotNone(snake_case_)
@slow
@require_torch_gpu
def UpperCamelCase ( self : Dict)-> str:
__lowerCAmelCase , __lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__lowerCAmelCase =True
__lowerCAmelCase =model_class(config=snake_case_)
__lowerCAmelCase =self._prepare_for_class(snake_case_ , snake_case_)
__lowerCAmelCase =torch.jit.trace(
snake_case_ , (inputs_dict["""input_ids"""].to("""cpu"""), inputs_dict["""attention_mask"""].to("""cpu""")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case_ , os.path.join(snake_case_ , """traced_model.pt"""))
__lowerCAmelCase =torch.jit.load(os.path.join(snake_case_ , """traced_model.pt""") , map_location=snake_case_)
loaded(inputs_dict["""input_ids"""].to(snake_case_) , inputs_dict["""attention_mask"""].to(snake_case_))
@require_torch
class __a ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : Tuple)-> str:
__lowerCAmelCase =FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""")
__lowerCAmelCase =torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
with torch.no_grad():
__lowerCAmelCase =model(snake_case_)[0]
__lowerCAmelCase =torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , snake_case_)
__lowerCAmelCase =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1e-4))
| 354 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = int(_lowerCamelCase )
# Initialize Result
__SCREAMING_SNAKE_CASE = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
a__ : Dict = []
a__ : Tuple = "0"
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
a__ : str = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"Denomination {i}: ").strip()))
a__ : str = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
a__ : Dict = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
a__ : Tuple = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"Following is minimal change for {value}: ")
a__ : Dict = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 704 |
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
a__ : Union[str, Any] = logging.get_logger('''transformers.models.encodec''')
a__ : Optional[Any] = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
a__ : int = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
a__ : List[Any] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
a__ : List[Any] = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
a__ : Union[str, Any] = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
a__ : List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
a__ : Any = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
a__ : List[Any] = []
a__ : str = []
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
for attribute in key.split("." ):
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
__SCREAMING_SNAKE_CASE = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "running_mean":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "running_var":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "num_batches_tracked":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_ih_l0":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_hh_l0":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "bias_ih_l0":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "bias_hh_l0":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_ih_l1":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_hh_l1":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "bias_ih_l1":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "bias_hh_l1":
__SCREAMING_SNAKE_CASE = value
else:
__SCREAMING_SNAKE_CASE = value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
if model_name == "encodec_24khz" or "encodec_32khz":
__SCREAMING_SNAKE_CASE = MAPPING_24K
elif model_name == "encodec_48khz":
__SCREAMING_SNAKE_CASE = MAPPING_48K
else:
raise ValueError(f"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(lowerCAmelCase_ , lowerCAmelCase_ ):
logger.info(f"""{name} was ignored""" )
continue
__SCREAMING_SNAKE_CASE = False
for key, mapped_key in MAPPING.items():
if "*" in key:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = key.split(".*." )
if prefix in name and suffix in name:
__SCREAMING_SNAKE_CASE = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
__SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
__SCREAMING_SNAKE_CASE = name.split(lowerCAmelCase_ )[0].split("." )[-2]
__SCREAMING_SNAKE_CASE = mapped_key.replace("*" , lowerCAmelCase_ )
if "weight_g" in name:
__SCREAMING_SNAKE_CASE = "weight_g"
elif "weight_v" in name:
__SCREAMING_SNAKE_CASE = "weight_v"
elif "weight_ih_l0" in name:
__SCREAMING_SNAKE_CASE = "weight_ih_l0"
elif "weight_hh_l0" in name:
__SCREAMING_SNAKE_CASE = "weight_hh_l0"
elif "bias_ih_l0" in name:
__SCREAMING_SNAKE_CASE = "bias_ih_l0"
elif "bias_hh_l0" in name:
__SCREAMING_SNAKE_CASE = "bias_hh_l0"
elif "weight_ih_l1" in name:
__SCREAMING_SNAKE_CASE = "weight_ih_l1"
elif "weight_hh_l1" in name:
__SCREAMING_SNAKE_CASE = "weight_hh_l1"
elif "bias_ih_l1" in name:
__SCREAMING_SNAKE_CASE = "bias_ih_l1"
elif "bias_hh_l1" in name:
__SCREAMING_SNAKE_CASE = "bias_hh_l1"
elif "bias" in name:
__SCREAMING_SNAKE_CASE = "bias"
elif "weight" in name:
__SCREAMING_SNAKE_CASE = "weight"
elif "running_mean" in name:
__SCREAMING_SNAKE_CASE = "running_mean"
elif "running_var" in name:
__SCREAMING_SNAKE_CASE = "running_var"
elif "num_batches_tracked" in name:
__SCREAMING_SNAKE_CASE = "num_batches_tracked"
else:
__SCREAMING_SNAKE_CASE = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
'''simple docstring'''
if config_path is not None:
__SCREAMING_SNAKE_CASE = EncodecConfig.from_pretrained(lowerCAmelCase_ )
else:
__SCREAMING_SNAKE_CASE = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
__SCREAMING_SNAKE_CASE = [8, 5, 4, 4]
__SCREAMING_SNAKE_CASE = [2.2]
__SCREAMING_SNAKE_CASE = 64
__SCREAMING_SNAKE_CASE = 3_2000
__SCREAMING_SNAKE_CASE = 2048
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
elif model_name == "encodec_48khz":
__SCREAMING_SNAKE_CASE = [8, 5, 4, 2]
__SCREAMING_SNAKE_CASE = [3.0, 6.0, 12.0, 24.0]
__SCREAMING_SNAKE_CASE = 4_8000
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = "time_group_norm"
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = 1.0
__SCREAMING_SNAKE_CASE = 0.01
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
__SCREAMING_SNAKE_CASE = EncodecModel(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = torch.load(lowerCAmelCase_ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
__SCREAMING_SNAKE_CASE = original_checkpoint["best_state"]
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(lowerCAmelCase_ )
model.push_to_hub(lowerCAmelCase_ )
if __name__ == "__main__":
a__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
a__ : Optional[int] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 553 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=_A ).to(_A )
UpperCamelCase : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
UpperCamelCase : List[str] = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
UpperCamelCase : Any = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
UpperCamelCase : Dict = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss
UpperCamelCase : List[Any] = -(labels.shape[-1] * loss.item())
UpperCamelCase : Union[str, Any] = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 102 | '''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
_UpperCamelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCamelCase : Tuple = 2_56
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
__lowerCamelCase = ['''melgan''']
def __init__( self: List[str] ,a: SpectrogramNotesEncoder ,a: SpectrogramContEncoder ,a: TaFilmDecoder ,a: DDPMScheduler ,a: OnnxRuntimeModel if is_onnx_available() else Any ,):
super().__init__()
# From MELGAN
__UpperCAmelCase = math.log(1e-5 ) # Matches MelGAN training.
__UpperCAmelCase = 4.0 # Largest value for most examples
__UpperCAmelCase = 128
self.register_modules(
notes_encoder=a ,continuous_encoder=a ,decoder=a ,scheduler=a ,melgan=a ,)
def snake_case ( self: List[str] ,a: Union[str, Any] ,a: Optional[Any]=(-1.0, 1.0) ,a: Optional[int]=False ):
__UpperCAmelCase , __UpperCAmelCase = output_range
if clip:
__UpperCAmelCase = torch.clip(a ,self.min_value ,self.max_value )
# Scale to [0, 1].
__UpperCAmelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def snake_case ( self: List[Any] ,a: List[str] ,a: int=(-1.0, 1.0) ,a: Optional[int]=False ):
__UpperCAmelCase , __UpperCAmelCase = input_range
__UpperCAmelCase = torch.clip(a ,a ,a ) if clip else outputs
# Scale to [0, 1].
__UpperCAmelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def snake_case ( self: Optional[int] ,a: Any ,a: Optional[Any] ,a: Optional[Any] ):
__UpperCAmelCase = input_tokens > 0
__UpperCAmelCase , __UpperCAmelCase = self.notes_encoder(
encoder_input_tokens=a ,encoder_inputs_mask=a )
__UpperCAmelCase , __UpperCAmelCase = self.continuous_encoder(
encoder_inputs=a ,encoder_inputs_mask=a )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def snake_case ( self: Optional[int] ,a: int ,a: str ,a: Dict ):
__UpperCAmelCase = noise_time
if not torch.is_tensor(a ):
__UpperCAmelCase = torch.tensor([timesteps] ,dtype=torch.long ,device=input_tokens.device )
elif torch.is_tensor(a ) and len(timesteps.shape ) == 0:
__UpperCAmelCase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase = timesteps * torch.ones(input_tokens.shape[0] ,dtype=timesteps.dtype ,device=timesteps.device )
__UpperCAmelCase = self.decoder(
encodings_and_masks=a ,decoder_input_tokens=a ,decoder_noise_time=a )
return logits
@torch.no_grad()
def __call__( self: str ,a: List[List[int]] ,a: Optional[torch.Generator] = None ,a: int = 100 ,a: bool = True ,a: str = "numpy" ,a: Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,a: int = 1 ,):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a ,a ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(a )}.""" )
__UpperCAmelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] ,dtype=np.floataa )
__UpperCAmelCase = np.zeros([1, 0, self.n_dims] ,np.floataa )
__UpperCAmelCase = torch.ones((1, TARGET_FEATURE_LENGTH) ,dtype=a ,device=self.device )
for i, encoder_input_tokens in enumerate(a ):
if i == 0:
__UpperCAmelCase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device ,dtype=self.decoder.dtype )
# The first chunk has no previous context.
__UpperCAmelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) ,dtype=a ,device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__UpperCAmelCase = ones
__UpperCAmelCase = self.scale_features(
a ,output_range=[-1.0, 1.0] ,clip=a )
__UpperCAmelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) ,continuous_inputs=a ,continuous_mask=a ,)
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__UpperCAmelCase = randn_tensor(
shape=encoder_continuous_inputs.shape ,generator=a ,device=self.device ,dtype=self.decoder.dtype ,)
# set step values
self.scheduler.set_timesteps(a )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCAmelCase = self.decode(
encodings_and_masks=a ,input_tokens=a ,noise_time=t / self.scheduler.config.num_train_timesteps ,)
# Compute previous output: x_t -> x_t-1
__UpperCAmelCase = self.scheduler.step(a ,a ,a ,generator=a ).prev_sample
__UpperCAmelCase = self.scale_to_features(a ,input_range=[-1.0, 1.0] )
__UpperCAmelCase = mel[:1]
__UpperCAmelCase = mel.cpu().float().numpy()
__UpperCAmelCase = np.concatenate([full_pred_mel, pred_mel[:1]] ,axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a ,a )
logger.info('Generated segment' ,a )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
__UpperCAmelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__UpperCAmelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=a )
| 396 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _UpperCAmelCase ):
UpperCAmelCase__ = (IPNDMScheduler,)
UpperCAmelCase__ = (('''num_inference_steps''', 5_0),)
def _snake_case ( self : Dict , **__lowerCamelCase : Dict ) -> Tuple:
__magic_name__ = {"""num_train_timesteps""": 1_0_0_0}
config.update(**lowercase_ )
return config
def _snake_case ( self : Tuple , __lowerCamelCase : Optional[Any]=0 , **__lowerCamelCase : Any ) -> List[str]:
__magic_name__ = dict(self.forward_default_kwargs )
__magic_name__ = kwargs.pop("num_inference_steps" , lowercase_ )
__magic_name__ = self.dummy_sample
__magic_name__ = 0.1 * sample
__magic_name__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__magic_name__ = self.get_scheduler_config(**lowercase_ )
__magic_name__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
__magic_name__ = dummy_past_residuals[:]
if time_step is None:
__magic_name__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
__magic_name__ = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
__magic_name__ = dummy_past_residuals[:]
__magic_name__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
__magic_name__ = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__magic_name__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
__magic_name__ = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _snake_case ( self : int ) -> List[str]:
pass
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : int=0 , **__lowerCamelCase : Any ) -> Union[str, Any]:
__magic_name__ = dict(self.forward_default_kwargs )
__magic_name__ = kwargs.pop("num_inference_steps" , lowercase_ )
__magic_name__ = self.dummy_sample
__magic_name__ = 0.1 * sample
__magic_name__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
__magic_name__ = dummy_past_residuals[:]
if time_step is None:
__magic_name__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
__magic_name__ = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
__magic_name__ = dummy_past_residuals[:]
__magic_name__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
__magic_name__ = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__magic_name__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
__magic_name__ = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _snake_case ( self : Union[str, Any] , **__lowerCamelCase : List[Any] ) -> List[Any]:
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config(**lowercase_ )
__magic_name__ = scheduler_class(**lowercase_ )
__magic_name__ = 1_0
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ = model(lowercase_ , lowercase_ )
__magic_name__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ = model(lowercase_ , lowercase_ )
__magic_name__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def _snake_case ( self : List[str] ) -> Optional[Any]:
__magic_name__ = dict(self.forward_default_kwargs )
__magic_name__ = kwargs.pop("num_inference_steps" , lowercase_ )
for scheduler_class in self.scheduler_classes:
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**lowercase_ )
__magic_name__ = self.dummy_sample
__magic_name__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ):
__magic_name__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__magic_name__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__magic_name__ = dummy_past_residuals[:]
__magic_name__ = scheduler.timesteps[5]
__magic_name__ = scheduler.timesteps[6]
__magic_name__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
__magic_name__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__magic_name__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
__magic_name__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self : Any ) -> List[str]:
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase_ , time_step=lowercase_ )
def _snake_case ( self : Union[str, Any] ) -> List[Any]:
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=lowercase_ , time_step=lowercase_ )
def _snake_case ( self : Tuple ) -> int:
__magic_name__ = self.full_loop()
__magic_name__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 2_5_4_0_5_2_9 ) < 1_0
| 708 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase = 250004
lowercase = 250020
@require_sentencepiece
@require_tokenizers
class A_ ( snake_case_ , unittest.TestCase ):
UpperCAmelCase__ = MBartaaTokenizer
UpperCAmelCase__ = MBartaaTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = True
def _snake_case ( self : Any ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__ = MBartaaTokenizer(__lowerCamelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : Dict ) -> List[Any]:
__magic_name__ = "<s>"
__magic_name__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def _snake_case ( self : int ) -> Union[str, Any]:
__magic_name__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__lowerCamelCase ) , 1_0_5_4 )
def _snake_case ( self : Any ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4 )
def _snake_case ( self : str ) -> Optional[Any]:
__magic_name__ = MBartaaTokenizer(__lowerCamelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__lowerCamelCase )
__magic_name__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__magic_name__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
__magic_name__ = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
__magic_name__ = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def _snake_case ( self : List[str] ) -> List[str]:
# fmt: off
__magic_name__ = {"input_ids": [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def _snake_case ( self : int ) -> Any:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__magic_name__ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
__magic_name__ = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = tokenizer_r.save_pretrained(__lowerCamelCase )
__magic_name__ = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
__magic_name__ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
__magic_name__ = tokenizer_r.from_pretrained(__lowerCamelCase )
__magic_name__ = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=True
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
__magic_name__ = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
__magic_name__ = tokenizer_r.from_pretrained(__lowerCamelCase )
__magic_name__ = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=False
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
__magic_name__ = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__magic_name__ = tokenizer_r.from_pretrained(__lowerCamelCase )
__magic_name__ = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
UpperCAmelCase__ = '''facebook/mbart-large-50-one-to-many-mmt'''
UpperCAmelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
UpperCAmelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
UpperCAmelCase__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def _snake_case ( cls : Any ) -> Dict:
__magic_name__ = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
__magic_name__ = 1
return cls
def _snake_case ( self : Any ) -> Dict:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 2_5_0_0_2_0 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 2_5_0_0_3_8 )
def _snake_case ( self : Any ) -> str:
__magic_name__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase )
def _snake_case ( self : Any ) -> List[str]:
self.assertIn(__lowerCamelCase , self.tokenizer.all_special_ids )
__magic_name__ = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
__magic_name__ = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
__magic_name__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCamelCase )
def _snake_case ( self : int ) -> List[str]:
__magic_name__ = ["this is gunna be a long sentence " * 2_0]
assert isinstance(src_text[0] , __lowerCamelCase )
__magic_name__ = 1_0
__magic_name__ = self.tokenizer(__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase ).input_ids[0]
self.assertEqual(ids[0] , __lowerCamelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
def _snake_case ( self : str ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [2_5_0_0_5_3, 2_5_0_0_0_1] )
def _snake_case ( self : Any ) -> str:
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCamelCase )
__magic_name__ = MBartaaTokenizer.from_pretrained(__lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCamelCase )
@require_torch
def _snake_case ( self : int ) -> Dict:
__magic_name__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , return_tensors="pt" )
__magic_name__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _snake_case ( self : Dict ) -> Tuple:
__magic_name__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
__magic_name__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
__magic_name__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _snake_case ( self : Optional[int] ) -> Dict:
__magic_name__ = self.tokenizer(self.src_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=3 , return_tensors="pt" )
__magic_name__ = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=1_0 , return_tensors="pt" )
__magic_name__ = targets["input_ids"]
__magic_name__ = shift_tokens_right(__lowerCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def _snake_case ( self : Optional[int] ) -> Tuple:
__magic_name__ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , {
# en_XX, A, test, EOS
"input_ids": [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 2_5_0_0_0_1,
} , )
| 468 | 0 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
"""simple docstring"""
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=1_6 , _A=3_6 , _A=6 , _A=6 , _A=6 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = parent
UpperCamelCase : List[str] = batch_size
UpperCamelCase : List[str] = seq_length
UpperCamelCase : str = is_training
UpperCamelCase : Optional[Any] = use_input_mask
UpperCamelCase : Tuple = use_token_type_ids
UpperCamelCase : List[str] = use_labels
UpperCamelCase : List[Any] = vocab_size
UpperCamelCase : Optional[int] = embedding_size
UpperCamelCase : Dict = hidden_size
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : Tuple = num_hidden_groups
UpperCamelCase : str = num_attention_heads
UpperCamelCase : str = intermediate_size
UpperCamelCase : int = hidden_act
UpperCamelCase : Union[str, Any] = hidden_dropout_prob
UpperCamelCase : int = attention_probs_dropout_prob
UpperCamelCase : Union[str, Any] = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : Tuple = type_sequence_label_size
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : Any = num_labels
UpperCamelCase : Dict = num_choices
UpperCamelCase : List[str] = scope
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Any = None
if self.use_token_type_ids:
UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Tuple = None
UpperCamelCase : Tuple = None
UpperCamelCase : int = None
if self.use_labels:
UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _a ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : Dict = AlbertModel(config=_A )
model.to(_A )
model.eval()
UpperCamelCase : Union[str, Any] = model(_A , attention_mask=_A , token_type_ids=_A )
UpperCamelCase : str = model(_A , token_type_ids=_A )
UpperCamelCase : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : List[str] = AlbertForPreTraining(config=_A )
model.to(_A )
model.eval()
UpperCamelCase : Tuple = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , sentence_order_label=_A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _a ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : List[str] = AlbertForMaskedLM(config=_A )
model.to(_A )
model.eval()
UpperCamelCase : List[Any] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : Tuple = AlbertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
UpperCamelCase : Tuple = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.num_labels
UpperCamelCase : int = AlbertForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCamelCase : int = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.num_labels
UpperCamelCase : Optional[Any] = AlbertForTokenClassification(config=_A )
model.to(_A )
model.eval()
UpperCamelCase : int = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : int = self.num_choices
UpperCamelCase : Any = AlbertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
UpperCamelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Optional[Any] = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Optional[Any] = config_and_inputs
UpperCamelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : str = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : str = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : int = True
def _a ( self , _A , _A , _A=False ):
'''simple docstring'''
UpperCamelCase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class in get_values(_A ):
UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_A )
UpperCamelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = AlbertModelTester(self )
UpperCamelCase : str = ConfigTester(self , config_class=_A , hidden_size=3_7 )
def _a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase : int = type
self.model_tester.create_and_check_model(*_A )
@slow
def _a ( self ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : int = AlbertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = AlbertModel.from_pretrained("""albert-base-v2""" )
UpperCamelCase : Optional[int] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCamelCase : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase : Tuple = model(_A , attention_mask=_A )[0]
UpperCamelCase : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , _A )
UpperCamelCase : int = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
| 102 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__magic_name__ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=_A , speech_processor=_A , vae=_A , text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , feature_extractor=_A , )
def _a ( self , _A = "auto" ):
'''simple docstring'''
if slice_size == "auto":
UpperCamelCase : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_A )
def _a ( self ):
'''simple docstring'''
self.enable_attention_slicing(_A )
@torch.no_grad()
def __call__( self , _A , _A=1_6_0_0_0 , _A = 5_1_2 , _A = 5_1_2 , _A = 5_0 , _A = 7.5 , _A = None , _A = 1 , _A = 0.0 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , **_A , ):
'''simple docstring'''
UpperCamelCase : str = self.speech_processor.feature_extractor(
_A , return_tensors="""pt""" , sampling_rate=_A ).input_features.to(self.device )
UpperCamelCase : List[Any] = self.speech_model.generate(_A , max_length=4_8_0_0_0_0 )
UpperCamelCase : Optional[int] = self.speech_processor.tokenizer.batch_decode(_A , skip_special_tokens=_A , normalize=_A )[
0
]
if isinstance(_A , _A ):
UpperCamelCase : Tuple = 1
elif isinstance(_A , _A ):
UpperCamelCase : List[Any] = len(_A )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_A )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_A )}.""" )
# get prompt text embeddings
UpperCamelCase : Dict = self.tokenizer(
_A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCamelCase : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCamelCase : int = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase : List[str] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = text_embeddings.shape
UpperCamelCase : Optional[int] = text_embeddings.repeat(1 , _A , 1 )
UpperCamelCase : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , _A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase : Optional[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase : List[str]
if negative_prompt is None:
UpperCamelCase : str = [""""""] * batch_size
elif type(_A ) is not type(_A ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(_A )} !="""
f""" {type(_A )}.""" )
elif isinstance(_A , _A ):
UpperCamelCase : Tuple = [negative_prompt]
elif batch_size != len(_A ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(_A )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
UpperCamelCase : Any = negative_prompt
UpperCamelCase : Optional[int] = text_input_ids.shape[-1]
UpperCamelCase : List[str] = self.tokenizer(
_A , padding="""max_length""" , max_length=_A , truncation=_A , return_tensors="""pt""" , )
UpperCamelCase : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase : List[Any] = uncond_embeddings.shape[1]
UpperCamelCase : Dict = uncond_embeddings.repeat(1 , _A , 1 )
UpperCamelCase : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase : str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase : Any = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase : Tuple = torch.randn(_A , generator=_A , device="""cpu""" , dtype=_A ).to(
self.device )
else:
UpperCamelCase : Any = torch.randn(_A , generator=_A , device=self.device , dtype=_A )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
UpperCamelCase : Optional[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase : Tuple = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase : str = {}
if accepts_eta:
UpperCamelCase : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : str = self.scheduler.scale_model_input(_A , _A )
# predict the noise residual
UpperCamelCase : Optional[Any] = self.unet(_A , _A , encoder_hidden_states=_A ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase : str = noise_pred.chunk(2 )
UpperCamelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : Any = self.scheduler.step(_A , _A , _A , **_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
UpperCamelCase : Optional[Any] = 1 / 0.1_82_15 * latents
UpperCamelCase : Union[str, Any] = self.vae.decode(_A ).sample
UpperCamelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase : Optional[Any] = self.numpy_to_pil(_A )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_A , nsfw_content_detected=_A )
| 102 | 1 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowercase_ : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowercase_ : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowercase_ : set[int] = {ord(char) for char in VALID_CHARS}
lowercase_ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A__ ( snake_case_ : list[int] , snake_case_ : tuple[int, ...] ):
SCREAMING_SNAKE_CASE__: str= ""
SCREAMING_SNAKE_CASE__: int
SCREAMING_SNAKE_CASE__: int
SCREAMING_SNAKE_CASE__: int
for keychar, cipherchar in zip(cycle(snake_case_ ) , snake_case_ ):
SCREAMING_SNAKE_CASE__: Any= cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case_ )
return decoded
def A__ ( snake_case_ : list[int] ):
SCREAMING_SNAKE_CASE__: list[str]= []
for key in product(snake_case_ , repeat=3 ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= try_key(snake_case_ , snake_case_ )
if encoded is not None:
possibles.append(snake_case_ )
return possibles
def A__ ( snake_case_ : list[str] , snake_case_ : str ):
return [possible for possible in possibles if common_word in possible.lower()]
def A__ ( snake_case_ : str = "p059_cipher.txt" ):
SCREAMING_SNAKE_CASE__: list[int]
SCREAMING_SNAKE_CASE__: list[str]
SCREAMING_SNAKE_CASE__: str
SCREAMING_SNAKE_CASE__: str
SCREAMING_SNAKE_CASE__: str= Path(snake_case_ ).parent.joinpath(snake_case_ ).read_text(encoding='''utf-8''' )
SCREAMING_SNAKE_CASE__: List[Any]= [int(snake_case_ ) for number in data.strip().split(''',''' )]
SCREAMING_SNAKE_CASE__: Union[str, Any]= filter_valid_chars(snake_case_ )
for common_word in COMMON_WORDS:
SCREAMING_SNAKE_CASE__: str= filter_common_word(snake_case_ , snake_case_ )
if len(snake_case_ ) == 1:
break
SCREAMING_SNAKE_CASE__: Dict= possibles[0]
return sum(ord(snake_case_ ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 107 | # tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase_ : List[str] = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def A__ ( snake_case_ : Optional[int] ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def A__ ( snake_case_ : Optional[int] ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__: Optional[int]= terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 107 | 1 |
from __future__ import annotations
def __a ( __UpperCAmelCase : list[int] ) -> List[Any]: # This function is recursive
"""simple docstring"""
lowerCamelCase_ : Dict = len(lowerCamelCase__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase_ : Union[str, Any] = array[0]
lowerCamelCase_ : Any = False
lowerCamelCase_ : Any = 1
lowerCamelCase_ : Any = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase_ : int = True
lowerCamelCase_ : int = [element for element in array[i:] if element >= array[i]]
lowerCamelCase_ : List[str] = longest_subsequence(lowerCamelCase__ )
if len(lowerCamelCase__ ) > len(lowerCamelCase__ ):
lowerCamelCase_ : int = temp_array
else:
i += 1
lowerCamelCase_ : Union[str, Any] = [element for element in array[1:] if element >= pivot]
lowerCamelCase_ : Optional[int] = [pivot, *longest_subsequence(lowerCamelCase__ )]
if len(lowerCamelCase__ ) > len(lowerCamelCase__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 488 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCAmelCase : Any = logging.getLogger(__name__)
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=lowerCamelCase__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=lowerCamelCase__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=lowerCamelCase__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=lowerCamelCase__ , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=lowerCamelCase__ , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=lowerCamelCase__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=lowerCamelCase__ , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
lowerCamelCase = parser.parse_args()
return args
def __lowerCamelCase ( lowerCamelCase__ : Any ):
'''simple docstring'''
def fn(lowerCamelCase__ : Any ):
return tokenizer(examples["""text"""] )
return fn
def __lowerCamelCase ( lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
lowerCamelCase = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
lowerCamelCase = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
lowerCamelCase = tf.train.Features(feature=lowerCamelCase__ )
lowerCamelCase = tf.train.Example(features=lowerCamelCase__ )
lowerCamelCase = example.SerializeToString()
records.append(lowerCamelCase__ )
return records
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCamelCase = min(len(lowerCamelCase__ ) , args.limit )
lowerCamelCase = dataset.select(range(lowerCamelCase__ ) )
print(f'Limiting the dataset to {args.limit} entries.' )
lowerCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCamelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
else:
lowerCamelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCamelCase = tokenize_function(lowerCamelCase__ )
lowerCamelCase = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowerCamelCase__ : List[str] ):
# Concatenate all texts.
lowerCamelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCamelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCamelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCamelCase = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCamelCase = dataset_tokenized.map(lowerCamelCase__ , batched=lowerCamelCase__ , batch_size=1000 , num_proc=4 )
lowerCamelCase = 0
lowerCamelCase = 0
for shard in range(0 , len(lowerCamelCase__ ) , args.shard_size ):
lowerCamelCase = grouped_dataset[shard : shard + args.shard_size]
lowerCamelCase = len(dataset_snapshot["""input_ids"""] )
lowerCamelCase = os.path.join(lowerCamelCase__ , f'dataset-{shard_count}-{records_containing}.tfrecord' )
lowerCamelCase = get_serialized_examples(lowerCamelCase__ )
with tf.io.TFRecordWriter(lowerCamelCase__ ) as out_file:
for i in range(len(lowerCamelCase__ ) ):
lowerCamelCase = serialized_examples[i]
out_file.write(lowerCamelCase__ )
print("""Wrote file {} containing {} records""".format(lowerCamelCase__ , lowerCamelCase__ ) )
shard_count += 1
total_records += records_containing
with open(f'split-{args.split}-records-count.txt' , """w""" ) as f:
print(f'Total {args.split} records: {total_records}' , file=lowerCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = parse_args()
main(args)
| 457 | 0 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_lowerCamelCase : int = logging.get_logger(__name__)
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''bs4'''] )
super().__init__(**lowercase__ )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
__A =[]
__A =[]
__A =element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__A =parent.find_all(child.name , recursive=lowercase__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(lowercase__ ) else next(i for i, s in enumerate(lowercase__ , 1 ) if s is child ) )
__A =parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
__A =BeautifulSoup(lowercase__ , '''html.parser''' )
__A =[]
__A =[]
__A =[]
for element in html_code.descendants:
if type(lowercase__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__A =html.unescape(lowercase__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(lowercase__ )
__A , __A =self.xpath_soup(lowercase__ )
stringaxtag_seq.append(lowercase__ )
stringaxsubs_seq.append(lowercase__ )
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def __UpperCamelCase ( self , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =''''''
for tagname, subs in zip(lowercase__ , lowercase__ ):
xpath += f'''/{tagname}'''
if subs != 0:
xpath += f'''[{subs}]'''
return xpath
def __call__( self , lowercase__ ):
'''simple docstring'''
__A =False
# Check that strings has a valid type
if isinstance(lowercase__ , lowercase__ ):
__A =True
elif isinstance(lowercase__ , (list, tuple) ):
if len(lowercase__ ) == 0 or isinstance(html_strings[0] , lowercase__ ):
__A =True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f'''but is of type {type(lowercase__ )}.''' )
__A =bool(isinstance(lowercase__ , (list, tuple) ) and (isinstance(html_strings[0] , lowercase__ )) )
if not is_batched:
__A =[html_strings]
# Get nodes + xpaths
__A =[]
__A =[]
for html_string in html_strings:
__A , __A , __A =self.get_three_from_single(lowercase__ )
nodes.append(lowercase__ )
__A =[]
for node, tag_list, sub_list in zip(lowercase__ , lowercase__ , lowercase__ ):
__A =self.construct_xpath(lowercase__ , lowercase__ )
xpath_strings.append(lowercase__ )
xpaths.append(lowercase__ )
# return as Dict
__A ={'''nodes''': nodes, '''xpaths''': xpaths}
__A =BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
return encoded_inputs
| 721 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
lowercase_ = """encodec"""
def __init__( self , lowercase__=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase__=2_4_0_0_0 , lowercase__=1 , lowercase__=False , lowercase__=None , lowercase__=None , lowercase__=1_2_8 , lowercase__=3_2 , lowercase__=1 , lowercase__=[8, 5, 4, 2] , lowercase__="weight_norm" , lowercase__=7 , lowercase__=7 , lowercase__=3 , lowercase__=2 , lowercase__=True , lowercase__="reflect" , lowercase__=2 , lowercase__=2 , lowercase__=1.0 , lowercase__=1_0_2_4 , lowercase__=None , lowercase__=True , **lowercase__ , ):
'''simple docstring'''
__A =target_bandwidths
__A =sampling_rate
__A =audio_channels
__A =normalize
__A =chunk_length_s
__A =overlap
__A =hidden_size
__A =num_filters
__A =num_residual_layers
__A =upsampling_ratios
__A =norm_type
__A =kernel_size
__A =last_kernel_size
__A =residual_kernel_size
__A =dilation_growth_rate
__A =use_causal_conv
__A =pad_mode
__A =compress
__A =num_lstm_layers
__A =trim_right_ratio
__A =codebook_size
__A =codebook_dim if codebook_dim is not None else hidden_size
__A =use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**lowercase__ )
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
| 516 | 0 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = (UnCLIPScheduler,)
def __lowercase ( self , **_a ) -> Optional[Any]:
_a : Tuple = {
'''num_train_timesteps''': 1_0_0_0,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_a )
return config
def __lowercase ( self ) -> List[str]:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_a )
def __lowercase ( self ) -> Union[str, Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_a )
def __lowercase ( self ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __lowercase ( self ) -> List[str]:
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=_a )
def __lowercase ( self ) -> Union[str, Any]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_a )
def __lowercase ( self ) -> Optional[Any]:
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_a , prev_timestep=_a )
def __lowercase ( self ) -> Tuple:
_a : Optional[int] = self.scheduler_classes[0]
_a : str = self.get_scheduler_config(variance_type='''fixed_small_log''' )
_a : List[str] = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0e-1_0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.999_4987 ) ) < 1e-5
def __lowercase ( self ) -> Optional[Any]:
_a : List[str] = self.scheduler_classes[0]
_a : List[str] = self.get_scheduler_config(variance_type='''learned_range''' )
_a : Tuple = scheduler_class(**_a )
_a : List[str] = 0.5
assert scheduler._get_variance(1 , predicted_variance=_a ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(4_8_7 , predicted_variance=_a ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(9_9_9 , predicted_variance=_a ) - -0.001_0011 < 1e-5
def __lowercase ( self ) -> Optional[Any]:
_a : Union[str, Any] = self.scheduler_classes[0]
_a : int = self.get_scheduler_config()
_a : List[Any] = scheduler_class(**_a )
_a : List[Any] = scheduler.timesteps
_a : Tuple = self.dummy_model()
_a : List[str] = self.dummy_sample_deter
_a : Union[str, Any] = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
_a : Union[str, Any] = model(_a , _a )
# 2. predict previous mean of sample x_t-1
_a : Union[str, Any] = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
_a : Tuple = pred_prev_sample
_a : int = torch.sum(torch.abs(_a ) )
_a : Dict = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def __lowercase ( self ) -> Union[str, Any]:
_a : str = self.scheduler_classes[0]
_a : Any = self.get_scheduler_config()
_a : Tuple = scheduler_class(**_a )
scheduler.set_timesteps(2_5 )
_a : Optional[int] = scheduler.timesteps
_a : Dict = self.dummy_model()
_a : Tuple = self.dummy_sample_deter
_a : Dict = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
_a : Dict = model(_a , _a )
if i + 1 == timesteps.shape[0]:
_a : Tuple = None
else:
_a : Union[str, Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_a : str = scheduler.step(
_a , _a , _a , prev_timestep=_a , generator=_a ).prev_sample
_a : Tuple = pred_prev_sample
_a : Any = torch.sum(torch.abs(_a ) )
_a : str = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def __lowercase ( self ) -> List[Any]:
pass
def __lowercase ( self ) -> List[Any]:
pass
| 14 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 574 | 0 |
"""simple docstring"""
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ = 5_0 ):
SCREAMING_SNAKE_CASE = [1] * (length + 1)
for row_length in range(3, length + 1 ):
for block_length in range(3, row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 406 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
snake_case = sys.version_info >= (3, 1_0)
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None ):
return field(default_factory=lambda: default, metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : int
UpperCAmelCase_ : float
UpperCAmelCase_ : str
UpperCAmelCase_ : bool
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : int = 42
UpperCAmelCase_ : str = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = True
UpperCAmelCase_ : Optional[bool] = None
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = "titi"
UpperCAmelCase_ : str = "toto"
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = "titi"
UpperCAmelCase_ : Tuple = "toto"
UpperCAmelCase_ : Any = 42
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : BasicEnum = "toto"
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicEnum(self.foo )
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : MixedTypeEnum = "toto"
def A ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = MixedTypeEnum(self.foo )
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[float] = field(default=__magic_name__ , metadata={"help": "help message"} )
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[List[str]] = list_field(default=[] )
UpperCAmelCase_ : Optional[List[int]] = list_field(default=[] )
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : List[int] = list_field(default=[] )
UpperCAmelCase_ : List[int] = list_field(default=[1, 2, 3] )
UpperCAmelCase_ : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
UpperCAmelCase_ : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : List[int] = field()
UpperCAmelCase_ : str = field()
UpperCAmelCase_ : BasicEnum = field()
def A ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicEnum(self.required_enum )
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : int
UpperCAmelCase_ : "BasicEnum" = field()
UpperCAmelCase_ : "Optional[bool]" = None
UpperCAmelCase_ : "str" = field(default="toto" , metadata={"help": "help message"} )
UpperCAmelCase_ : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = True
UpperCAmelCase_ : bool | None = None
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : int | None = None
UpperCAmelCase_ : float | None = field(default=__magic_name__ , metadata={"help": "help message"} )
UpperCAmelCase_ : str | None = None
UpperCAmelCase_ : list[str] | None = list_field(default=[] )
UpperCAmelCase_ : list[int] | None = list_field(default=[] )
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self , lowercase__ , lowercase__ ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
SCREAMING_SNAKE_CASE = {k: v for k, v in vars(lowercase__ ).items() if k != 'container'}
SCREAMING_SNAKE_CASE = {k: v for k, v in vars(lowercase__ ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , lowercase__ ) and yy.get('choices' , lowercase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](lowercase__ ) , yy['type'](lowercase__ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase__ , lowercase__ )
def A ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase__ , required=lowercase__ )
expected.add_argument('--bar' , type=lowercase__ , required=lowercase__ )
expected.add_argument('--baz' , type=lowercase__ , required=lowercase__ )
expected.add_argument('--flag' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='?' )
self.argparsersEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((SCREAMING_SNAKE_CASE) , ) = parser.parse_args_into_dataclasses(lowercase__ , look_for_args_file=lowercase__ )
self.assertFalse(example.flag )
def A ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=lowercase__ )
expected.add_argument('--baz' , default='toto' , type=lowercase__ , help='help message' )
self.argparsersEqual(lowercase__ , lowercase__ )
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='?' )
expected.add_argument('--baz' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=lowercase__ , dest='baz' )
expected.add_argument('--opt' , type=lowercase__ , default=lowercase__ )
SCREAMING_SNAKE_CASE = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
def A ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def A ( self ) -> Dict:
"""simple docstring"""
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : Literal["titi", "toto", 42] = "toto"
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def A ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=lowercase__ )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=lowercase__ )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase__ )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(
lowercase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
SCREAMING_SNAKE_CASE = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(lowercase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def A ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument('--foo' , default=lowercase__ , type=lowercase__ )
expected.add_argument('--bar' , default=lowercase__ , type=lowercase__ , help='help message' )
expected.add_argument('--baz' , default=lowercase__ , type=lowercase__ )
expected.add_argument('--ces' , nargs='+' , default=[] , type=lowercase__ )
expected.add_argument('--des' , nargs='+' , default=[] , type=lowercase__ )
SCREAMING_SNAKE_CASE = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , bar=lowercase__ , baz=lowercase__ , ces=[] , des=[] ) )
SCREAMING_SNAKE_CASE = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(lowercase__ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def A ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=lowercase__ , required=lowercase__ )
expected.add_argument('--required_str' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=lowercase__ , )
self.argparsersEqual(lowercase__ , lowercase__ )
def A ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=lowercase__ , )
expected.add_argument('--opt' , type=lowercase__ , default=lowercase__ )
expected.add_argument('--baz' , default='toto' , type=lowercase__ , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
def A ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
SCREAMING_SNAKE_CASE = parser.parse_dict(lowercase__ )[0]
SCREAMING_SNAKE_CASE = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def A ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(lowercase__ , parser.parse_dict , lowercase__ , allow_extra_keys=lowercase__ )
def A ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = os.path.join(lowercase__ , 'temp_json' )
os.mkdir(lowercase__ )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
SCREAMING_SNAKE_CASE = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = os.path.join(lowercase__ , 'temp_yaml' )
os.mkdir(lowercase__ )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
SCREAMING_SNAKE_CASE = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
self.assertIsNotNone(lowercase__ )
| 406 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Any = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class __snake_case ( __SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'timesformer'
def __init__( self : List[Any] , lowerCamelCase : str=2_24 , lowerCamelCase : Tuple=16 , lowerCamelCase : Union[str, Any]=3 , lowerCamelCase : Tuple=8 , lowerCamelCase : str=7_68 , lowerCamelCase : str=12 , lowerCamelCase : str=12 , lowerCamelCase : Optional[int]=30_72 , lowerCamelCase : List[str]="gelu" , lowerCamelCase : Dict=0.0 , lowerCamelCase : Any=0.0 , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : Dict=1E-6 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Optional[Any]="divided_space_time" , lowerCamelCase : Dict=0 , **lowerCamelCase : Dict , ) -> Dict:
super().__init__(**snake_case__ )
lowerCAmelCase_ : int = image_size
lowerCAmelCase_ : Optional[int] = patch_size
lowerCAmelCase_ : List[Any] = num_channels
lowerCAmelCase_ : Any = num_frames
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : Tuple = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : Union[str, Any] = layer_norm_eps
lowerCAmelCase_ : int = qkv_bias
lowerCAmelCase_ : Optional[Any] = attention_type
lowerCAmelCase_ : str = drop_path_rate
| 275 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Any = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : str = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowercase_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 572 | 0 |
'''simple docstring'''
from __future__ import annotations
from random import random
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , A__ : int | None = None ):
"""simple docstring"""
__lowerCamelCase : int = value
__lowerCamelCase : Dict = random()
__lowerCamelCase : Node | None = None
__lowerCamelCase : Node | None = None
def __repr__( self : Any ):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return f"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{f"{self.value}: {self.prior:.5}": (self.left, self.right)} , indent=1 )
def __str__( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase : List[str] = str(self.value ) + """ """
__lowerCamelCase : Any = str(self.left or """""" )
__lowerCamelCase : List[str] = str(self.right or """""" )
return value + left + right
def __lowercase (_lowercase, _lowercase ) -> tuple[Node | None, Node | None]:
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
__lowerCamelCase : int = split(root.left, _lowercase )
return left, root
else:
__lowerCamelCase : Union[str, Any] = split(root.right, _lowercase )
return root, right
def __lowercase (_lowercase, _lowercase ) -> Node | None:
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
__lowerCamelCase : Tuple = merge(left.right, _lowercase )
return left
else:
__lowerCamelCase : int = merge(_lowercase, right.left )
return right
def __lowercase (_lowercase, _lowercase ) -> Node | None:
"""simple docstring"""
__lowerCamelCase : List[str] = Node(_lowercase )
__lowerCamelCase : List[Any] = split(_lowercase, _lowercase )
return merge(merge(_lowercase, _lowercase ), _lowercase )
def __lowercase (_lowercase, _lowercase ) -> Node | None:
"""simple docstring"""
__lowerCamelCase : int = split(_lowercase, value - 1 )
__lowerCamelCase : List[str] = split(_lowercase, _lowercase )
return merge(_lowercase, _lowercase )
def __lowercase (_lowercase ) -> None:
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value, end=""",""" )
inorder(root.right )
def __lowercase (_lowercase, _lowercase ) -> Node | None:
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
__lowerCamelCase : Dict = insert(_lowercase, int(arg[1:] ) )
elif arg[0] == "-":
__lowerCamelCase : List[str] = erase(_lowercase, int(arg[1:] ) )
else:
print("""Unknown command""" )
return root
def __lowercase () -> None:
"""simple docstring"""
__lowerCamelCase : str = None
print(
"""enter numbers to create a tree, + value to add value into treap, """
"""- value to erase all nodes with value. 'q' to quit. """ )
__lowerCamelCase : int = input()
while args != "q":
__lowerCamelCase : List[Any] = interact_treap(_lowercase, _lowercase )
print(_lowercase )
__lowerCamelCase : List[Any] = input()
print("""good by!""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 703 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : List[Any] = 'SpeechT5FeatureExtractor'
snake_case__ : Union[str, Any] = 'SpeechT5Tokenizer'
def __init__( self : Union[str, Any] , A__ : str , A__ : Tuple ):
"""simple docstring"""
super().__init__(A__ , A__ )
def __call__( self : List[Any] , *A__ : Any , **A__ : Optional[int] ):
"""simple docstring"""
__lowerCamelCase : Any = kwargs.pop("""audio""" , A__ )
__lowerCamelCase : Any = kwargs.pop("""text""" , A__ )
__lowerCamelCase : Union[str, Any] = kwargs.pop("""text_target""" , A__ )
__lowerCamelCase : Union[str, Any] = kwargs.pop("""audio_target""" , A__ )
__lowerCamelCase : Any = kwargs.pop("""sampling_rate""" , A__ )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
__lowerCamelCase : Optional[int] = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ )
elif text is not None:
__lowerCamelCase : Optional[int] = self.tokenizer(A__ , **A__ )
else:
__lowerCamelCase : Tuple = None
if audio_target is not None:
__lowerCamelCase : Optional[int] = self.feature_extractor(audio_target=A__ , *A__ , sampling_rate=A__ , **A__ )
__lowerCamelCase : Union[str, Any] = targets["""input_values"""]
elif text_target is not None:
__lowerCamelCase : Tuple = self.tokenizer(A__ , **A__ )
__lowerCamelCase : List[str] = targets["""input_ids"""]
else:
__lowerCamelCase : Dict = None
if inputs is None:
return targets
if targets is not None:
__lowerCamelCase : str = labels
__lowerCamelCase : int = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
__lowerCamelCase : Optional[int] = decoder_attention_mask
return inputs
def a_ ( self : Any , *A__ : Union[str, Any] , **A__ : List[str] ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = kwargs.pop("""input_values""" , A__ )
__lowerCamelCase : List[Any] = kwargs.pop("""input_ids""" , A__ )
__lowerCamelCase : List[Any] = kwargs.pop("""labels""" , A__ )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
__lowerCamelCase : List[Any] = self.feature_extractor.pad(A__ , *A__ , **A__ )
elif input_ids is not None:
__lowerCamelCase : Any = self.tokenizer.pad(A__ , **A__ )
else:
__lowerCamelCase : Any = None
if labels is not None:
if "input_ids" in labels or (isinstance(A__ , A__ ) and "input_ids" in labels[0]):
__lowerCamelCase : List[Any] = self.tokenizer.pad(A__ , **A__ )
__lowerCamelCase : Tuple = targets["""input_ids"""]
else:
__lowerCamelCase : Optional[int] = self.feature_extractor.feature_size
__lowerCamelCase : Any = self.feature_extractor.num_mel_bins
__lowerCamelCase : List[str] = self.feature_extractor.pad(A__ , *A__ , **A__ )
__lowerCamelCase : Dict = feature_size_hack
__lowerCamelCase : List[Any] = targets["""input_values"""]
else:
__lowerCamelCase : Optional[int] = None
if inputs is None:
return targets
if targets is not None:
__lowerCamelCase : Dict = labels
__lowerCamelCase : List[str] = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
__lowerCamelCase : int = decoder_attention_mask
return inputs
def a_ ( self : Optional[Any] , *A__ : List[Any] , **A__ : Optional[int] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*A__ , **A__ )
def a_ ( self : Any , *A__ : int , **A__ : Dict ):
"""simple docstring"""
return self.tokenizer.decode(*A__ , **A__ )
| 483 | 0 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[Any] = XLMRobertaTokenizer
lowercase__ : Optional[Any] = XLMRobertaTokenizerFast
lowercase__ : Union[str, Any] = True
lowercase__ : List[str] = True
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = XLMRobertaTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = '''<pad>'''
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase_ ) , 10_02 )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = XLMRobertaTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase__ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCAmelCase__ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase_ , f.name )
lowerCAmelCase__ = XLMRobertaTokenizer(f.name , keep_accents=lowerCamelCase_ )
lowerCAmelCase__ = pickle.dumps(lowerCamelCase_ )
pickle.loads(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase__ = tokenizer.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ )
lowerCAmelCase__ = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = '''Hello World!'''
lowerCAmelCase__ = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowerCAmelCase__ = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
# fmt: off
lowerCAmelCase__ = {'''input_ids''': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , ) | 90 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCAmelCase_ = Lock()
def SCREAMING_SNAKE_CASE ( a_ : str , a_ : Tuple , a_ : Dict , a_ : Optional[Any] , a_ : str , a_ : Any , a_ : Optional[Any] ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(a_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__a = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__a = min(a_ , a_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(a_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__a = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__a = max(a_ , a_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(a_ )
def SCREAMING_SNAKE_CASE ( a_ : List[str] ):
__a = []
__a = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__a = Pipe()
__a = Pipe()
process_array_.append(
Process(
target=a_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__a = temp_rs
__a = temp_rr
for i in range(1 , len(a_ ) - 1 ):
__a = Pipe()
__a = Pipe()
process_array_.append(
Process(
target=a_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__a = temp_rs
__a = temp_rr
process_array_.append(
Process(
target=a_ , args=(
len(a_ ) - 1,
arr[len(a_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(a_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(a_ ) ):
__a = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE ( ):
__a = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*a_ )
__a = odd_even_transposition(a_ )
print('Sorted List\n' )
print(*a_ )
if __name__ == "__main__":
main()
| 539 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case ={
"""configuration_mask2former""": [
"""MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Mask2FormerConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =["""Mask2FormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Mask2FormerForUniversalSegmentation""",
"""Mask2FormerModel""",
"""Mask2FormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 513 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={"""vocab_file""": """spiece.model"""}
__snake_case ={
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
__snake_case ={
"""google/bigbird-roberta-base""": 4_096,
"""google/bigbird-roberta-large""": 4_096,
"""google/bigbird-base-trivia-itc""": 4_096,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[int] = []
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Union[str, Any]="<s>" , UpperCAmelCase__ : str="</s>" , UpperCAmelCase__ : List[Any]="<pad>" , UpperCAmelCase__ : Any="[SEP]" , UpperCAmelCase__ : List[Any]="[MASK]" , UpperCAmelCase__ : Optional[int]="[CLS]" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Tuple , ) -> None:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else bos_token
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else eos_token
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else unk_token
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else pad_token
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else cls_token
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
@property
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Dict ) -> Tuple:
lowerCAmelCase = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> Optional[int]:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self : int , UpperCAmelCase__ : List[Any] ) -> Dict:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : Optional[int] ) -> Dict:
return self.sp_model.piece_to_id(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : int ) -> Optional[int]:
lowerCAmelCase = self.sp_model.IdToPiece(UpperCAmelCase__ )
return token
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : List[str] ) -> Any:
lowerCAmelCase = []
lowerCAmelCase = ''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase__ ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(UpperCAmelCase__ )
lowerCAmelCase = False
out_string += self.sp_model.decode(UpperCAmelCase__ )
return out_string.strip()
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : int , ) -> str:
lowerCAmelCase = kwargs.pop('use_source_tokenizer' , UpperCAmelCase__ )
lowerCAmelCase = self.convert_ids_to_tokens(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCAmelCase = []
lowerCAmelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase__ ) )
lowerCAmelCase = []
sub_texts.append(UpperCAmelCase__ )
else:
current_sub_text.append(UpperCAmelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
lowerCAmelCase = re.sub(R' (\[(MASK|SEP)\])' , R'\1' , ' '.join(UpperCAmelCase__ ) )
else:
lowerCAmelCase = ''.join(UpperCAmelCase__ )
lowerCAmelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCAmelCase = self.clean_up_tokenization(UpperCAmelCase__ )
return clean_text
else:
return text
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase = os.path.join(
UpperCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ )) + [1]
return [1] + ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 513 | 1 |
"""simple docstring"""
from __future__ import annotations
class UpperCamelCase :
def __init__( self , snake_case__=None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = data
_SCREAMING_SNAKE_CASE : Optional[int] = None
def __repr__( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = []
_SCREAMING_SNAKE_CASE : Union[str, Any] = self
while temp:
string_rep.append(F'''{temp.data}''' )
_SCREAMING_SNAKE_CASE : Any = temp.next
return "->".join(_lowercase )
def _lowerCAmelCase ( lowerCamelCase__ : List[str] ) -> List[str]:
if not elements_list:
raise Exception("The Elements List is empty" )
_SCREAMING_SNAKE_CASE : int = Node(elements_list[0] )
for i in range(1, len(lowerCamelCase__ ) ):
_SCREAMING_SNAKE_CASE : Optional[Any] = Node(elements_list[i] )
_SCREAMING_SNAKE_CASE : Optional[int] = current.next
return head
def _lowerCAmelCase ( lowerCamelCase__ : Dict ) -> int:
if head_node is not None and isinstance(lowerCamelCase__, lowerCamelCase__ ):
print_reverse(head_node.next )
print(head_node.data )
def _lowerCAmelCase ( ) -> List[Any]:
from doctest import testmod
testmod()
_SCREAMING_SNAKE_CASE : Any = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] )
print("Linked List:" )
print(lowerCamelCase__ )
print("Elements in Reverse:" )
print_reverse(lowerCamelCase__ )
if __name__ == "__main__":
main()
| 572 |
from __future__ import annotations
class lowerCAmelCase :
def __init__( self :Union[str, Any] , _lowercase :List[Any]=None ):
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __repr__( self :Dict ):
'''simple docstring'''
lowercase__ = []
lowercase__ = self
while temp:
string_rep.append(f'''{temp.data}''' )
lowercase__ = temp.next
return "->".join(_lowercase )
def _A ( __magic_name__ ):
if not elements_list:
raise Exception("The Elements List is empty" )
lowercase__ = lowercase__ = Node(elements_list[0] )
for i in range(1 , len(__magic_name__ ) ):
lowercase__ = Node(elements_list[i] )
lowercase__ = current.next
return head
def _A ( __magic_name__ ):
if head_node is not None and isinstance(__magic_name__ , __magic_name__ ):
print_reverse(head_node.next )
print(head_node.data )
def _A ( ):
from doctest import testmod
testmod()
lowercase__ = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(__magic_name__ )
print("Elements in Reverse:" )
print_reverse(__magic_name__ )
if __name__ == "__main__":
main()
| 655 | 0 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowerCAmelCase__ = 6_378_137.0
lowerCAmelCase__ = 6_356_752.314_245
lowerCAmelCase__ = 6_3_7_8_1_3_7
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowercase__ : Union[str, Any] = atan((1 - flattening) * tan(radians(lowerCamelCase__ ) ) )
lowercase__ : Optional[int] = atan((1 - flattening) * tan(radians(lowerCamelCase__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowercase__ : str = haversine_distance(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowercase__ : Optional[Any] = (b_lata + b_lata) / 2
lowercase__ : List[Any] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowercase__ : str = (sin(lowerCamelCase__ ) ** 2) * (cos(lowerCamelCase__ ) ** 2)
lowercase__ : List[Any] = cos(sigma / 2 ) ** 2
lowercase__ : Dict = (sigma - sin(lowerCamelCase__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowercase__ : Tuple = (cos(lowerCamelCase__ ) ** 2) * (sin(lowerCamelCase__ ) ** 2)
lowercase__ : Union[str, Any] = sin(sigma / 2 ) ** 2
lowercase__ : Any = (sigma + sin(lowerCamelCase__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """informer"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : str = "student_t" , SCREAMING_SNAKE_CASE : str = "nll" , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : List[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.05 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 100 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str = "prob" , SCREAMING_SNAKE_CASE : int = 5 , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : List[Any] , ):
# time series specific configuration
lowercase__ : Any = prediction_length
lowercase__ : List[str] = context_length or prediction_length
lowercase__ : Tuple = distribution_output
lowercase__ : Union[str, Any] = loss
lowercase__ : Union[str, Any] = input_size
lowercase__ : List[str] = num_time_features
lowercase__ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowercase__ : List[str] = scaling
lowercase__ : str = num_dynamic_real_features
lowercase__ : Tuple = num_static_real_features
lowercase__ : List[str] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Dict = cardinality
else:
lowercase__ : Dict = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Union[str, Any] = embedding_dimension
else:
lowercase__ : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase__ : Dict = num_parallel_samples
# Transformer architecture configuration
lowercase__ : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase__ : Optional[Any] = d_model
lowercase__ : int = encoder_attention_heads
lowercase__ : Tuple = decoder_attention_heads
lowercase__ : List[Any] = encoder_ffn_dim
lowercase__ : List[str] = decoder_ffn_dim
lowercase__ : List[str] = encoder_layers
lowercase__ : Tuple = decoder_layers
lowercase__ : Union[str, Any] = dropout
lowercase__ : List[Any] = attention_dropout
lowercase__ : str = activation_dropout
lowercase__ : int = encoder_layerdrop
lowercase__ : Union[str, Any] = decoder_layerdrop
lowercase__ : Tuple = activation_function
lowercase__ : str = init_std
lowercase__ : Tuple = use_cache
# Informer
lowercase__ : Union[str, Any] = attention_type
lowercase__ : Union[str, Any] = sampling_factor
lowercase__ : Tuple = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : str ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 81 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser(
description=(
"Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="bert", choices=["bert"])
parser.add_argument("--model_name", default="bert-base-uncased", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
a : List[Any] = parser.parse_args()
if args.model_type == "bert":
a : Dict = BertForMaskedLM.from_pretrained(args.model_name)
a : Union[str, Any] = "bert"
else:
raise ValueError("args.model_type should be \"bert\".")
a : Optional[Any] = model.state_dict()
a : Optional[Any] = {}
for w in ["word_embeddings", "position_embeddings"]:
a : Union[str, Any] = state_dict[F'{prefix}.embeddings.{w}.weight']
for w in ["weight", "bias"]:
a : Any = state_dict[F'{prefix}.embeddings.LayerNorm.{w}']
a : Any = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
a : Tuple = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'
]
a : Dict = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'
]
a : List[str] = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'
]
a : Any = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'
]
a : List[Any] = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'
]
a : Dict = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'
]
a : Dict = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'
]
a : Optional[Any] = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'
]
std_idx += 1
a : List[str] = state_dict["cls.predictions.decoder.weight"]
a : Tuple = state_dict["cls.predictions.bias"]
if args.vocab_transform:
for w in ["weight", "bias"]:
a : List[Any] = state_dict[F'cls.predictions.transform.dense.{w}']
a : int = state_dict[F'cls.predictions.transform.LayerNorm.{w}']
print(F'N layers selected for distillation: {std_idx}')
print(F'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(F'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 679 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ : List[Any] = 10
def A_ ( self , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**snake_case )
return config
def A_ ( self ):
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case )
def A_ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def A_ ( self ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case )
def A_ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase : Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase : Optional[Any] = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = self.dummy_model()
UpperCAmelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : Any = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : Tuple = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : List[Any] = model(snake_case , snake_case )
UpperCAmelCase : str = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : List[Any] = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase : int = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCAmelCase : List[Any] = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = self.dummy_model()
UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : int = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : List[Any] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Any = output.prev_sample
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase : Optional[int] = self.get_scheduler_config()
UpperCAmelCase : Any = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : int = self.dummy_model()
UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : str = sample.to(snake_case )
for t in scheduler.timesteps:
UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : List[Any] = model(snake_case , snake_case )
UpperCAmelCase : List[str] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.scheduler_classes[0]
UpperCAmelCase : Tuple = self.get_scheduler_config()
UpperCAmelCase : Dict = scheduler_class(**snake_case , use_karras_sigmas=snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case )
UpperCAmelCase : List[str] = torch.manual_seed(0 )
UpperCAmelCase : Any = self.dummy_model()
UpperCAmelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : List[str] = sample.to(snake_case )
for t in scheduler.timesteps:
UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : List[str] = output.prev_sample
UpperCAmelCase : int = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
| 679 | 1 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def a ( _UpperCAmelCase : int="" ):
'''simple docstring'''
__UpperCAmelCase : Any = tempfile.mkdtemp()
return os.path.join(lowerCAmelCase__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : List[str] = torch.rand(12 , dtype=torch.floataa ) - 0.5
__UpperCAmelCase : Optional[int] = AgentAudio(UpperCamelCase__ )
__UpperCAmelCase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase__ , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(UpperCamelCase__ ) )
# Ensure that the file contains the same value as the original tensor
__UpperCAmelCase : Tuple = sf.read(UpperCamelCase__ )
self.assertTrue(torch.allclose(UpperCamelCase__ , torch.tensor(UpperCamelCase__ ) , atol=1e-4 ) )
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5
__UpperCAmelCase : int = get_new_path(suffix='''.wav''' )
sf.write(UpperCamelCase__ , UpperCamelCase__ , 1_60_00 )
__UpperCAmelCase : Tuple = AgentAudio(UpperCamelCase__ )
self.assertTrue(torch.allclose(UpperCamelCase__ , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , UpperCamelCase__ )
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = torch.randint(0 , 2_56 , (64, 64, 3) )
__UpperCAmelCase : Tuple = AgentImage(UpperCamelCase__ )
__UpperCAmelCase : Tuple = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase__ , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase__ ) )
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '000000039769.png'
__UpperCAmelCase : Optional[Any] = Image.open(UpperCamelCase__ )
__UpperCAmelCase : Optional[Any] = AgentImage(UpperCamelCase__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase__ ) )
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : int = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '000000039769.png'
__UpperCAmelCase : List[str] = Image.open(UpperCamelCase__ )
__UpperCAmelCase : str = AgentImage(UpperCamelCase__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase__ ) )
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 'Hey!'
__UpperCAmelCase : Union[str, Any] = AgentText(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , agent_type.to_string() )
self.assertEqual(UpperCamelCase__ , agent_type.to_raw() )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 709 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def a ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = hf_hub_url(repo_id=_UpperCAmelCase , path=_UpperCAmelCase , revision=_UpperCAmelCase )
assert url == f'https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(_UpperCAmelCase )}'
| 241 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A = {
"""configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""],
"""tokenization_deberta""": ["""DebertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ["""DebertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"""DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DebertaForMaskedLM""",
"""DebertaForQuestionAnswering""",
"""DebertaForSequenceClassification""",
"""DebertaForTokenClassification""",
"""DebertaModel""",
"""DebertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"""TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDebertaForMaskedLM""",
"""TFDebertaForQuestionAnswering""",
"""TFDebertaForSequenceClassification""",
"""TFDebertaForTokenClassification""",
"""TFDebertaModel""",
"""TFDebertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 77 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[Any] = ConsistencyModelPipeline
lowerCAmelCase : str = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCAmelCase : Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowerCAmelCase : Any = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[int] = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' ,subfolder='''test_unet''' ,)
return unet
@property
def UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Any = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' ,subfolder='''test_unet_class_cond''' ,)
return unet
def UpperCAmelCase ( self : Optional[int] ,_snake_case : int=False ) -> Dict:
"""simple docstring"""
if class_cond:
lowercase__ : Optional[int] = self.dummy_cond_unet
else:
lowercase__ : List[Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
lowercase__ : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.002 ,sigma_max=80.0 ,)
lowercase__ : Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Tuple ,_snake_case : List[Any]=0 ) -> str:
"""simple docstring"""
if str(_snake_case ).startswith('''mps''' ):
lowercase__ : Dict = torch.manual_seed(_snake_case )
else:
lowercase__ : int = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : List[str] = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ : Union[str, Any] = self.get_dummy_components()
lowercase__ : Dict = ConsistencyModelPipeline(**_snake_case )
lowercase__ : List[str] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : List[str] = self.get_dummy_inputs(_snake_case )
lowercase__ : Union[str, Any] = pipe(**_snake_case ).images
assert image.shape == (1, 32, 32, 3)
lowercase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowercase__ : Dict = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
lowercase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ : int = self.get_dummy_components(class_cond=_snake_case )
lowercase__ : List[Any] = ConsistencyModelPipeline(**_snake_case )
lowercase__ : Dict = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Optional[Any] = self.get_dummy_inputs(_snake_case )
lowercase__ : Dict = 0
lowercase__ : Union[str, Any] = pipe(**_snake_case ).images
assert image.shape == (1, 32, 32, 3)
lowercase__ : List[Any] = image[0, -3:, -3:, -1]
lowercase__ : Union[str, Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ : str = self.get_dummy_components()
lowercase__ : int = ConsistencyModelPipeline(**_snake_case )
lowercase__ : List[str] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
lowercase__ : Union[str, Any] = 1
lowercase__ : List[Any] = None
lowercase__ : Any = pipe(**_snake_case ).images
assert image.shape == (1, 32, 32, 3)
lowercase__ : Dict = image[0, -3:, -3:, -1]
lowercase__ : List[str] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
lowercase__ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ : Any = self.get_dummy_components(class_cond=_snake_case )
lowercase__ : int = ConsistencyModelPipeline(**_snake_case )
lowercase__ : Dict = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : List[str] = self.get_dummy_inputs(_snake_case )
lowercase__ : Union[str, Any] = 1
lowercase__ : Any = None
lowercase__ : Union[str, Any] = 0
lowercase__ : Optional[Any] = pipe(**_snake_case ).images
assert image.shape == (1, 32, 32, 3)
lowercase__ : List[Any] = image[0, -3:, -3:, -1]
lowercase__ : Optional[Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Any=0 ,_snake_case : Optional[int]=False ,_snake_case : List[Any]="cpu" ,_snake_case : Tuple=torch.floataa ,_snake_case : List[Any]=(1, 3, 64, 64) ) -> str:
"""simple docstring"""
lowercase__ : Optional[int] = torch.manual_seed(_snake_case )
lowercase__ : Optional[int] = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
lowercase__ : str = self.get_fixed_latents(seed=_snake_case ,device=_snake_case ,dtype=_snake_case ,shape=_snake_case )
lowercase__ : Tuple = latents
return inputs
def UpperCAmelCase ( self : List[Any] ,_snake_case : int=0 ,_snake_case : Any="cpu" ,_snake_case : Optional[Any]=torch.floataa ,_snake_case : Tuple=(1, 3, 64, 64) ) -> Any:
"""simple docstring"""
if type(_snake_case ) == str:
lowercase__ : List[str] = torch.device(_snake_case )
lowercase__ : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : Tuple = randn_tensor(_snake_case ,generator=_snake_case ,device=_snake_case ,dtype=_snake_case )
return latents
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Union[str, Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
lowercase__ : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.002 ,sigma_max=80.0 ,)
lowercase__ : Optional[int] = ConsistencyModelPipeline(unet=_snake_case ,scheduler=_snake_case )
pipe.to(torch_device=_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Union[str, Any] = self.get_inputs()
lowercase__ : Union[str, Any] = pipe(**_snake_case ).images
assert image.shape == (1, 64, 64, 3)
lowercase__ : int = image[0, -3:, -3:, -1]
lowercase__ : Dict = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowercase__ : str = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
lowercase__ : str = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.002 ,sigma_max=80.0 ,)
lowercase__ : List[Any] = ConsistencyModelPipeline(unet=_snake_case ,scheduler=_snake_case )
pipe.to(torch_device=_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : int = self.get_inputs()
lowercase__ : Optional[int] = 1
lowercase__ : Union[str, Any] = None
lowercase__ : Optional[int] = pipe(**_snake_case ).images
assert image.shape == (1, 64, 64, 3)
lowercase__ : int = image[0, -3:, -3:, -1]
lowercase__ : Any = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ : Optional[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
lowercase__ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.002 ,sigma_max=80.0 ,)
lowercase__ : Tuple = ConsistencyModelPipeline(unet=_snake_case ,scheduler=_snake_case )
pipe.to(torch_device=_snake_case ,torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Optional[Any] = self.get_inputs(get_fixed_latents=_snake_case ,device=_snake_case )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_snake_case ,enable_math=_snake_case ,enable_mem_efficient=_snake_case ):
lowercase__ : List[str] = pipe(**_snake_case ).images
assert image.shape == (1, 64, 64, 3)
lowercase__ : int = image[0, -3:, -3:, -1]
lowercase__ : str = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[str] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
lowercase__ : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.002 ,sigma_max=80.0 ,)
lowercase__ : Any = ConsistencyModelPipeline(unet=_snake_case ,scheduler=_snake_case )
pipe.to(torch_device=_snake_case ,torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Optional[int] = self.get_inputs(get_fixed_latents=_snake_case ,device=_snake_case )
lowercase__ : List[str] = 1
lowercase__ : Optional[Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_snake_case ,enable_math=_snake_case ,enable_mem_efficient=_snake_case ):
lowercase__ : Any = pipe(**_snake_case ).images
assert image.shape == (1, 64, 64, 3)
lowercase__ : Tuple = image[0, -3:, -3:, -1]
lowercase__ : str = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 560 | 0 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class __lowerCAmelCase( lowerCAmelCase__ ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Dict=None , **SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[str] = parent
SCREAMING_SNAKE_CASE_ :str = config_class
SCREAMING_SNAKE_CASE_ :List[str] = has_text_modality
SCREAMING_SNAKE_CASE_ :Optional[int] = kwargs
SCREAMING_SNAKE_CASE_ :Any = common_properties
def _lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :str = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , msg=f'`{prop}` does not exist' )
# Test that config has the common properties as setter
for idx, name in enumerate(SCREAMING_SNAKE_CASE ):
try:
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , msg=f'`{name} value {idx} expected, but was {getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(SCREAMING_SNAKE_CASE ):
try:
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , msg=f'`{name} value {idx} expected, but was {getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[str] = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE_ :str = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , SCREAMING_SNAKE_CASE )
def _lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Dict = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ :Dict = os.path.join(SCREAMING_SNAKE_CASE , 'config.json' )
config_first.to_json_file(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = self.config_class.from_json_file(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Dict = self.config_class.from_pretrained(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Tuple = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE_ :List[str] = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ :Dict = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
config_first.save_pretrained(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[Any] = self.config_class.from_pretrained(SCREAMING_SNAKE_CASE , subfolder=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :int = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
SCREAMING_SNAKE_CASE_ :Optional[int] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _lowercase ( self : Dict ):
"""simple docstring"""
if self.config_class.is_composition:
return
SCREAMING_SNAKE_CASE_ :Any = self.config_class()
self.parent.assertIsNotNone(SCREAMING_SNAKE_CASE )
def _lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[int] = self.config_class(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) != value:
wrong_values.append((key, getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), value) )
if len(SCREAMING_SNAKE_CASE ) > 0:
SCREAMING_SNAKE_CASE_ :int = '\n'.join([f'- {v[0]}: got {v[1]} instead of {v[2]}' for v in wrong_values] )
raise ValueError(f'The following keys were not properly set in the config:\n{errors}' )
def _lowercase ( self : List[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 233 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase( lowerCAmelCase__ ):
__snake_case : str = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : List[Any] , **SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE_ :Optional[Any] = deprecated_arg[3:]
setattr(self , SCREAMING_SNAKE_CASE , not kwargs.pop(SCREAMING_SNAKE_CASE ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
SCREAMING_SNAKE_CASE_ :List[str] = kwargs.pop('torchscript' , self.torchscript )
SCREAMING_SNAKE_CASE_ :Dict = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
SCREAMING_SNAKE_CASE_ :str = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**SCREAMING_SNAKE_CASE )
__snake_case : bool = field(default=lowerCAmelCase__ , metadata={'help': 'Trace the models using torchscript'} )
__snake_case : bool = field(default=lowerCAmelCase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
__snake_case : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def _lowercase ( self : str ):
"""simple docstring"""
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
SCREAMING_SNAKE_CASE_ :Tuple = torch.device('cpu' )
SCREAMING_SNAKE_CASE_ :List[str] = 0
elif is_torch_tpu_available():
SCREAMING_SNAKE_CASE_ :Dict = xm.xla_device()
SCREAMING_SNAKE_CASE_ :Tuple = 0
else:
SCREAMING_SNAKE_CASE_ :int = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
SCREAMING_SNAKE_CASE_ :Tuple = torch.cuda.device_count()
return device, n_gpu
@property
def _lowercase ( self : str ):
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def _lowercase ( self : int ):
"""simple docstring"""
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _lowercase ( self : List[str] ):
"""simple docstring"""
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def _lowercase ( self : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def _lowercase ( self : Dict ):
"""simple docstring"""
return self.n_gpu > 0
| 233 | 1 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __snake_case( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , A_ , A_ , A_ = None , A_ = None ) -> Optional[Any]:
super().__init__()
lowerCAmelCase = pad_token_id
lowerCAmelCase = max_length
lowerCAmelCase = vocab
lowerCAmelCase = merges
lowerCAmelCase = BytePairTokenizer(__A , __A , sequence_length=__A )
@classmethod
def __snake_case ( cls , A_ , *A_ , **A_ ) -> Any:
lowerCAmelCase = [""" """.join(__A ) for m in tokenizer.bpe_ranks.keys()]
lowerCAmelCase = tokenizer.get_vocab()
return cls(__A , __A , *__A , **__A )
@classmethod
def __snake_case ( cls , A_ , *A_ , **A_ ) -> str:
lowerCAmelCase = GPTaTokenizer.from_pretrained(__A , *__A , **__A )
return cls.from_tokenizer(__A , *__A , **__A )
@classmethod
def __snake_case ( cls , A_ ) -> List[str]:
return cls(**__A )
def __snake_case ( self ) -> Tuple:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def __snake_case ( self , A_ , A_ = None ) -> List[str]:
lowerCAmelCase = self.tf_tokenizer(__A )
lowerCAmelCase = tf.ones_like(__A )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCAmelCase = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCAmelCase = pad_model_inputs(
__A , max_seq_length=__A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids} | 433 |
UpperCAmelCase_ : dict[tuple[int, int, int], int] = {}
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__A : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__A : Dict = _calculate(days - 1 ,a__ ,late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__A : List[str] = _calculate(days - 1 ,absent + 1 ,0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__A : int = _calculate(days - 1 ,a__ ,0 )
__A : Optional[int] = state_late + state_absent + state_ontime
__A : Tuple = prizestrings
return prizestrings
def __SCREAMING_SNAKE_CASE ( a__ : int = 30 ) -> int:
return _calculate(a__ ,absent=0 ,late=0 )
if __name__ == "__main__":
print(solution())
| 17 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=30 , lowercase__=2 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=32 , lowercase__=5 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=10 , lowercase__=0.0_2 , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : int = patch_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE : Optional[int] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : str = num_patches + 1
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : str = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
return config, pixel_values
def _UpperCamelCase ( self , lowercase__ , lowercase__ ) -> Dict:
SCREAMING_SNAKE_CASE : Optional[int] = FlaxViTModel(config=lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = model(lowercase__ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE : List[Any] = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE : Tuple = (self.patch_size, self.patch_size)
SCREAMING_SNAKE_CASE : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ ) -> List[str]:
SCREAMING_SNAKE_CASE : Optional[Any] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[int] = FlaxViTForImageClassification(config=lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : Tuple = FlaxViTForImageClassification(lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowercase__ )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Dict = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _UpperCamelCase ( self ) -> None:
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxViTModelTester(self )
SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 )
def _UpperCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowercase__ )
SCREAMING_SNAKE_CASE : int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase__ )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : Any = model_class(lowercase__ )
@jax.jit
def model_jitted(lowercase__ , **lowercase__ ):
return model(pixel_values=lowercase__ , **lowercase__ )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE : Any = model_jitted(**lowercase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : Any = model_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _UpperCamelCase ( self ) -> int:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = model_class_name.from_pretrained('google/vit-base-patch16-224' )
SCREAMING_SNAKE_CASE : Tuple = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowercase__ )
| 179 | '''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : int = CLIPTokenizer
snake_case__ : Union[str, Any] = CLIPTokenizerFast
snake_case__ : str = True
snake_case__ : Optional[int] = {}
snake_case__ : int = False
def _UpperCamelCase ( self ) -> Tuple:
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE : Optional[int] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE : int = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
SCREAMING_SNAKE_CASE : Dict = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE : List[str] = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase__ ) )
def _UpperCamelCase ( self , **lowercase__ ) -> Dict:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def _UpperCamelCase ( self , **lowercase__ ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase__ )
def _UpperCamelCase ( self , lowercase__ ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer'
SCREAMING_SNAKE_CASE : Dict = 'lower newer'
return input_text, output_text
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : str = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Optional[int] = 'lower newer'
SCREAMING_SNAKE_CASE : List[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : int = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
@require_ftfy
def _UpperCamelCase ( self ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE : List[str] = tokenizer_s.tokenize(lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE : Tuple = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_s.tokenize(lowercase__ )
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE : Tuple = tokenizer_s.tokenize(lowercase__ )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE : int = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_s.tokenize(lowercase__ )
SCREAMING_SNAKE_CASE : int = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
def _UpperCamelCase ( self ) -> int:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE : Union[str, Any] = F"""{text_of_1_token} {text_of_1_token}"""
SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , )
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
SCREAMING_SNAKE_CASE : Optional[int] = F""" {text}"""
SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
def _UpperCamelCase ( self ) -> int:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowercase__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def _UpperCamelCase ( self ) -> Union[str, Any]:
super().test_tokenization_python_rust_equals()
def _UpperCamelCase ( self ) -> int:
# CLIP always lower cases letters
pass
| 179 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : List[Any] = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 120 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
UpperCAmelCase_ : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase_ : Union[str, Any] = {
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase_ : str = {
"unc-nlp/lxmert-base-uncased": 512,
}
UpperCAmelCase_ : Optional[int] = {
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
__lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : List[Any] = LxmertTokenizer
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_="[UNK]" , lowerCamelCase_="[SEP]" , lowerCamelCase_="[PAD]" , lowerCamelCase_="[CLS]" , lowerCamelCase_="[MASK]" , lowerCamelCase_=True , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Optional[int]:
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenize_chinese_chars=lowerCamelCase_ , strip_accents=lowerCamelCase_ , **lowerCamelCase_ , )
_a : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCamelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCamelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCamelCase_ ) != tokenize_chinese_chars
):
_a : str = getattr(lowerCamelCase_ , normalizer_state.pop('type' ) )
_a : Tuple = do_lower_case
_a : str = strip_accents
_a : Optional[Any] = tokenize_chinese_chars
_a : Dict = normalizer_class(**lowerCamelCase_ )
_a : Optional[int] = do_lower_case
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_=None ) -> Optional[Any]:
_a : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
_a : Union[str, Any] = [self.sep_token_id]
_a : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> Tuple[str]:
_a : Optional[Any] = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 120 | 1 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
snake_case__ : Any = logging.getLogger(__name__)
class snake_case_( __A ):
__UpperCamelCase = '''sequence-classification'''
def __init__( self : Any , UpperCamelCase_ : Optional[int] ):
if type(UpperCamelCase_ ) == dict:
lowerCAmelCase : Any = Namespace(**UpperCamelCase_ )
lowerCAmelCase : int = glue_output_modes[hparams.task]
lowerCAmelCase : Any = glue_tasks_num_labels[hparams.task]
super().__init__(UpperCamelCase_ , UpperCamelCase_ , self.mode )
def lowerCamelCase__ ( self : Dict , **UpperCamelCase_ : Any ):
return self.model(**UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : Optional[int] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase : str = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
lowerCAmelCase : Union[str, Any] = self(**UpperCamelCase_ )
lowerCAmelCase : List[Any] = outputs[0]
lowerCAmelCase : str = self.trainer.lr_schedulers[0]['''scheduler''']
lowerCAmelCase : Union[str, Any] = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = self.hparams
lowerCAmelCase : str = processors[args.task]()
lowerCAmelCase : Optional[int] = processor.get_labels()
for mode in ["train", "dev"]:
lowerCAmelCase : List[str] = self._feature_file(UpperCamelCase_ )
if os.path.exists(UpperCamelCase_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , UpperCamelCase_ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
lowerCAmelCase : Optional[int] = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
lowerCAmelCase : Optional[int] = convert_examples_to_features(
UpperCamelCase_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , UpperCamelCase_ )
torch.save(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : bool = False ):
lowerCAmelCase : Tuple = '''dev''' if mode == '''test''' else mode
lowerCAmelCase : Any = self._feature_file(UpperCamelCase_ )
logger.info('''Loading features from cached file %s''' , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = torch.load(UpperCamelCase_ )
lowerCAmelCase : int = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCAmelCase : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowerCAmelCase : Tuple = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase : int = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase : int = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , batch_size=UpperCamelCase_ , shuffle=UpperCamelCase_ , )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ):
lowerCAmelCase : Any = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase : Any = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
lowerCAmelCase : Union[str, Any] = self(**UpperCamelCase_ )
lowerCAmelCase : List[Any] = outputs[:2]
lowerCAmelCase : Union[str, Any] = logits.detach().cpu().numpy()
lowerCAmelCase : int = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Dict ):
lowerCAmelCase : Optional[Any] = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
lowerCAmelCase : List[str] = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase : Optional[Any] = np.argmax(UpperCamelCase_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase : int = np.squeeze(UpperCamelCase_ )
lowerCAmelCase : Any = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
lowerCAmelCase : int = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase : Tuple = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase : Any = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase_ , UpperCamelCase_ )}
lowerCAmelCase : Optional[Any] = dict(results.items() )
lowerCAmelCase : str = results
return ret, preds_list, out_label_list
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : list ):
lowerCAmelCase : Dict = self._eval_end(UpperCamelCase_ )
lowerCAmelCase : Any = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Tuple ):
lowerCAmelCase : Union[str, Any] = self._eval_end(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCamelCase__ ( UpperCamelCase_ : str , UpperCamelCase_ : Any ):
BaseTransformer.add_model_specific_args(UpperCamelCase_ , UpperCamelCase_ )
parser.add_argument(
'''--max_seq_length''' , default=1_2_8 , type=UpperCamelCase_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=UpperCamelCase_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def _snake_case ( ):
lowerCAmelCase : Tuple = argparse.ArgumentParser()
add_generic_args(__A , os.getcwd() )
lowerCAmelCase : Any = GLUETransformer.add_model_specific_args(__A , os.getcwd() )
lowerCAmelCase : Optional[Any] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCAmelCase : Tuple = os.path.join(
'''./results''' , f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
lowerCAmelCase : str = GLUETransformer(__A )
lowerCAmelCase : Any = generic_train(__A , __A )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCAmelCase : Tuple = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=__A ) )
lowerCAmelCase : int = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__A )
if __name__ == "__main__":
main()
| 708 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class snake_case_:
def __init__( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict=1_3 , UpperCamelCase_ : Optional[Any]=7 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[str]=9_9 , UpperCamelCase_ : Tuple=3_2 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Any=3_7 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Union[str, Any]=5_1_2 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : int=None , ):
lowerCAmelCase : Any = parent
lowerCAmelCase : Any = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : str = is_training
lowerCAmelCase : List[Any] = use_input_mask
lowerCAmelCase : Optional[int] = use_token_type_ids
lowerCAmelCase : Union[str, Any] = use_labels
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : Tuple = type_sequence_label_size
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : str = num_labels
lowerCAmelCase : Optional[int] = num_choices
lowerCAmelCase : Tuple = scope
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Tuple = None
if self.use_input_mask:
lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : int = None
lowerCAmelCase : int = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Tuple ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : List[Any] = LlamaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Any , ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Optional[int] = LlamaModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
lowerCAmelCase : Dict = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , )
lowerCAmelCase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : str , ):
lowerCAmelCase : Optional[Any] = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , ):
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : str = True
lowerCAmelCase : Tuple = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# first forward pass
lowerCAmelCase : Optional[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , )
lowerCAmelCase : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase : Dict = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0]
lowerCAmelCase : str = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0]
# select random slice
lowerCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) : Tuple = config_and_inputs
lowerCAmelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_( a__ , a__ , a__ , unittest.TestCase ):
__UpperCamelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__UpperCamelCase = (LlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = LlamaModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 )
def lowerCamelCase__ ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase : str = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : List[str] = 3
lowerCAmelCase : List[str] = input_dict['''input_ids''']
lowerCAmelCase : List[str] = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : Union[str, Any] = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = 3
lowerCAmelCase : int = '''single_label_classification'''
lowerCAmelCase : Tuple = input_dict['''input_ids''']
lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : Tuple = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = 3
lowerCAmelCase : Dict = '''multi_label_classification'''
lowerCAmelCase : Union[str, Any] = input_dict['''input_ids''']
lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase : Optional[int] = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def lowerCamelCase__ ( self : Optional[Any] ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[int] = ids_tensor([1, 1_0] , config.vocab_size )
lowerCAmelCase : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase : List[Any] = LlamaModel(UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
original_model.eval()
lowerCAmelCase : Optional[int] = original_model(UpperCamelCase_ ).last_hidden_state
lowerCAmelCase : List[Any] = original_model(UpperCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase : int = {'''type''': scaling_type, '''factor''': 10.0}
lowerCAmelCase : List[str] = LlamaModel(UpperCamelCase_ )
scaled_model.to(UpperCamelCase_ )
scaled_model.eval()
lowerCAmelCase : Union[str, Any] = scaled_model(UpperCamelCase_ ).last_hidden_state
lowerCAmelCase : Optional[int] = scaled_model(UpperCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
@require_torch
class snake_case_( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Tuple = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
lowerCAmelCase : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase : int = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Tuple = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : str = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Dict = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
lowerCAmelCase : str = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
lowerCAmelCase : Any = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Tuple = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : int = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
lowerCAmelCase : List[Any] = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
lowerCAmelCase : List[str] = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Dict = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
lowerCAmelCase : Any = model(torch.tensor(UpperCamelCase_ ) )
lowerCAmelCase : Optional[Any] = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase : Any = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[Any] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
lowerCAmelCase : int = '''Simply put, the theory of relativity states that '''
lowerCAmelCase : str = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
lowerCAmelCase : Optional[int] = tokenizer.encode(UpperCamelCase_ , return_tensors='''pt''' )
lowerCAmelCase : List[Any] = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCamelCase_ )
# greedy generation outputs
lowerCAmelCase : int = model.generate(UpperCamelCase_ , max_new_tokens=6_4 , top_p=UpperCamelCase_ , temperature=1 , do_sample=UpperCamelCase_ )
lowerCAmelCase : int = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 637 | 0 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class lowerCamelCase_ :
pass
| 17 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : Optional[Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__snake_case = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
__snake_case = {"""mobilebert-uncased""": 512}
__snake_case = {}
class _a ( __a ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_INIT_CONFIGURATION
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = MobileBertTokenizer
def __init__( self : Any , lowercase_ : Dict=None , lowercase_ : Dict=None , lowercase_ : str=True , lowercase_ : List[Any]="[UNK]" , lowercase_ : Any="[SEP]" , lowercase_ : Dict="[PAD]" , lowercase_ : Tuple="[CLS]" , lowercase_ : Optional[Any]="[MASK]" , lowercase_ : Optional[int]=True , lowercase_ : List[str]=None , **lowercase_ : int , ):
'''simple docstring'''
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
lowercase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase_ ) != tokenize_chinese_chars
):
lowercase_ = getattr(lowercase_ , normalizer_state.pop("""type""" ) )
lowercase_ = do_lower_case
lowercase_ = strip_accents
lowercase_ = tokenize_chinese_chars
lowercase_ = normalizer_class(**lowercase_ )
lowercase_ = do_lower_case
def lowerCamelCase__ ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str]=None ):
'''simple docstring'''
lowercase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Dict , lowercase_ : str , lowercase_ : Optional[str] = None ):
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
| 603 | '''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
lowercase_ = [[0 for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowercase_ = 1
for n in range(m + 1 ):
for k in range(1 , SCREAMING_SNAKE_CASE_ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__snake_case = int(input("""Enter a number: """).strip())
print(partition(n))
except ValueError:
print("""Please enter a number.""")
else:
try:
__snake_case = int(sys.argv[1])
print(partition(n))
except ValueError:
print("""Please pass a number.""")
| 603 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.