code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__a : Optional[Any] = logging.get_logger(__name__)
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> None:
lowercase__ : Any = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ), F"""{len(SCREAMING_SNAKE_CASE_ )} != {len(SCREAMING_SNAKE_CASE_ )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__a : Union[str, Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__a : Any = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> int:
try:
lowercase__ : List[Any] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> List[int]:
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(SCREAMING_SNAKE_CASE_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "student" ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ,) -> Tuple[PreTrainedModel, List[int], List[int]]:
lowercase__ : List[str] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ).save_pretrained(SCREAMING_SNAKE_CASE_ ) # purely for convenience
lowercase__ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).eval()
else:
assert isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ), F"""teacher must be a model or string got type {type(SCREAMING_SNAKE_CASE_ )}"""
lowercase__ : Union[str, Any] = teacher.config.to_diff_dict()
try:
lowercase__ , lowercase__ : Optional[int] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
lowercase__ : Optional[int] = teacher_e
if d is None:
lowercase__ : List[Any] = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config ,"num_encoder_layers" ):
lowercase__ , lowercase__ : Optional[Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
lowercase__ , lowercase__ : List[str] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
lowercase__ : str = teacher_e
if d is None:
lowercase__ : List[str] = teacher_d
if hasattr(teacher.config ,"num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(SCREAMING_SNAKE_CASE_ )
# Copy weights
lowercase__ : Optional[Any] = teacher.config_class(**SCREAMING_SNAKE_CASE_ )
lowercase__ : int = AutoModelForSeqaSeqLM.from_config(SCREAMING_SNAKE_CASE_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
lowercase__ : Optional[Any] = student.load_state_dict(teacher.state_dict() ,strict=SCREAMING_SNAKE_CASE_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
lowercase__ , lowercase__ : str = list(range(SCREAMING_SNAKE_CASE_ ) ), list(range(SCREAMING_SNAKE_CASE_ ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(SCREAMING_SNAKE_CASE_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
lowercase__ : List[int] = pick_layers_to_copy(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if d_layers_to_copy is None:
lowercase__ : List[int] = pick_layers_to_copy(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
try:
if hasattr(
SCREAMING_SNAKE_CASE_ ,"prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers ,student.prophetnet.encoder.layers ,SCREAMING_SNAKE_CASE_ )
copy_layers(teacher.prophetnet.decoder.layers ,student.prophetnet.decoder.layers ,SCREAMING_SNAKE_CASE_ )
else:
copy_layers(teacher.model.encoder.layers ,student.model.encoder.layers ,SCREAMING_SNAKE_CASE_ )
copy_layers(teacher.model.decoder.layers ,student.model.decoder.layers ,SCREAMING_SNAKE_CASE_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block ,student.encoder.block ,SCREAMING_SNAKE_CASE_ )
copy_layers(teacher.decoder.block ,student.decoder.block ,SCREAMING_SNAKE_CASE_ )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
lowercase__ : str = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers) | 397 |
from math import factorial
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> float:
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) or not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
lowercase__ : Dict = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
lowercase__ : Tuple = float(factorial(SCREAMING_SNAKE_CASE_ ) )
coefficient /= factorial(SCREAMING_SNAKE_CASE_ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75)) | 397 | 1 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case :Dict = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : List[str] = GPTSwaTokenizer
UpperCamelCase__ : Dict = False
UpperCamelCase__ : int = True
UpperCamelCase__ : List[Any] = False
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''')
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = '''This is a test'''
__a = '''This is a test'''
return input_text, output_text
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = '''<s>'''
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''j''')
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 2_000)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2_000)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE)
__a = tokenizer.tokenize('''This is a test''')
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , [465, 287, 265, 631, 842])
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
__a = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE)
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__a = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE)
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''])
# fmt: on
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE)
__a = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
__a = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertListEqual(tokenizer.encode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
# Test that decode_fast returns the input text
for text, token_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertEqual(tokenizer.decode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
__a = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__SCREAMING_SNAKE_CASE , )
| 60 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __snake_case ( _UpperCAmelCase ):
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(_UpperCAmelCase )
return 2.0 * image - 1.0
class _A ( __UpperCAmelCase ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : VQModel , __SCREAMING_SNAKE_CASE : UNetaDModel , __SCREAMING_SNAKE_CASE : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE)
@torch.no_grad()
def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[torch.Tensor, PIL.Image.Image] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : Optional[int] = 100 , __SCREAMING_SNAKE_CASE : Optional[float] = 0.0 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = 1
elif isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor):
__a = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__SCREAMING_SNAKE_CASE)}')
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = preprocess(__SCREAMING_SNAKE_CASE)
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters()).dtype
__a = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=__SCREAMING_SNAKE_CASE)
__a = image.to(device=self.device , dtype=__SCREAMING_SNAKE_CASE)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=self.device)
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(__SCREAMING_SNAKE_CASE):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1)
__a = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# predict the noise residual
__a = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(__SCREAMING_SNAKE_CASE).sample
__a = torch.clamp(__SCREAMING_SNAKE_CASE , -1.0 , 1.0)
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(__SCREAMING_SNAKE_CASE)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE)
| 60 | 1 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:List[str] , *_a:List[Any] , **_a:Optional[Any] ):
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 33 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
lowercase = logging.getLogger(__name__)
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''masked_bert'''
def __init__( self , snake_case=30522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-12 , snake_case=0 , snake_case="topK" , snake_case="constant" , snake_case=0.0 , **snake_case , ) -> str:
super().__init__(pad_token_id=snake_case , **snake_case )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = pruning_method
_UpperCAmelCase = mask_init
_UpperCAmelCase = mask_scale
| 573 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCAmelCase__ ( __lowerCamelCase ):
UpperCamelCase_ : str = 42
class lowerCAmelCase__ ( __lowerCamelCase , __lowerCamelCase ):
@register_to_config
def __init__( self , a = 3 , a = 3 , a = ("DownEncoderBlock2D",) , a = ("UpDecoderBlock2D",) , a = (64,) , a = 1 , a = "silu" , a = 3 , a = 32 , a = 2_56 , a = 32 , a = None , a = 0.1_8215 , a = "group" , ) -> Tuple:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
_UpperCamelCase = Encoder(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , down_block_types=UpperCAmelCase_ , block_out_channels=UpperCAmelCase_ , layers_per_block=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , norm_num_groups=UpperCAmelCase_ , double_z=UpperCAmelCase_ , )
_UpperCamelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
_UpperCamelCase = nn.Convad(UpperCAmelCase_ , UpperCAmelCase_ , 1 )
_UpperCamelCase = VectorQuantizer(UpperCAmelCase_ , UpperCAmelCase_ , beta=0.25 , remap=UpperCAmelCase_ , sane_index_shape=UpperCAmelCase_ )
_UpperCamelCase = nn.Convad(UpperCAmelCase_ , UpperCAmelCase_ , 1 )
# pass init params to Decoder
_UpperCamelCase = Decoder(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , up_block_types=UpperCAmelCase_ , block_out_channels=UpperCAmelCase_ , layers_per_block=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , norm_num_groups=UpperCAmelCase_ , norm_type=UpperCAmelCase_ , )
@apply_forward_hook
def A_ ( self , a , a = True ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.encoder(UpperCAmelCase_ )
_UpperCamelCase = self.quant_conv(UpperCAmelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCAmelCase_ )
@apply_forward_hook
def A_ ( self , a , a = False , a = True ) -> Union[str, Any]:
'''simple docstring'''
if not force_not_quantize:
_UpperCamelCase = self.quantize(UpperCAmelCase_ )
else:
_UpperCamelCase = h
_UpperCamelCase = self.post_quant_conv(UpperCAmelCase_ )
_UpperCamelCase = self.decoder(UpperCAmelCase_ , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase_ )
def A_ ( self , a , a = True ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = sample
_UpperCamelCase = self.encode(UpperCAmelCase_ ).latents
_UpperCamelCase = self.decode(UpperCAmelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase_ )
| 706 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 202 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ = logging.getLogger(__name__)
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[Any] = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''', type=A_, default='''wikitext''', help='''Name of the training. Explore datasets at: hf.co/datasets.''', )
parser.add_argument(
'''--dataset_config''', type=A_, default='''wikitext-103-raw-v1''', help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''', type=A_, default='''sayakpaul/unigram-tokenizer-wikitext''', help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''', )
parser.add_argument(
'''--shard_size''', type=A_, default=10_00, help='''Number of entries to go in a single shard.''', )
parser.add_argument('''--split''', type=A_, default='''train''', choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''', default=A_, type=A_, help='''Limit the number of shards (used for debugging).''', )
parser.add_argument(
'''--max_length''', type=A_, default=5_12, help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''', )
parser.add_argument(
'''--output_dir''', default='''tf-tpu''', type=A_, help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''', )
_lowerCamelCase : int = parser.parse_args()
return args
def snake_case_ ( A_ : Optional[Any] ):
'''simple docstring'''
def fn(A_ : str ):
return tokenizer(examples['''text'''] )
return fn
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
_lowerCamelCase : str = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
_lowerCamelCase : Union[str, Any] = tf.train.Features(feature=A_ )
_lowerCamelCase : int = tf.train.Example(features=A_ )
_lowerCamelCase : int = example.SerializeToString()
records.append(A_ )
return records
def snake_case_ ( A_ : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
_lowerCamelCase : Optional[int] = min(len(A_ ), args.limit )
_lowerCamelCase : Tuple = dataset.select(range(A_ ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
_lowerCamelCase : int = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_lowerCamelCase : List[Any] = os.path.join(args.output_dir, args.split )
if not os.path.exists(A_ ):
os.makedirs(A_ )
else:
_lowerCamelCase : Any = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
_lowerCamelCase : Tuple = tokenize_function(A_ )
_lowerCamelCase : Dict = dataset.map(A_, batched=A_, num_proc=4, remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A_ : int ):
# Concatenate all texts.
_lowerCamelCase : Union[str, Any] = {k: sum(examples[k], [] ) for k in examples.keys()}
_lowerCamelCase : Any = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_lowerCamelCase : Any = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_lowerCamelCase : List[str] = {
k: [t[i : i + args.max_length] for i in range(0, A_, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_lowerCamelCase : str = dataset_tokenized.map(A_, batched=A_, batch_size=10_00, num_proc=4 )
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Optional[int] = 0
for shard in range(0, len(A_ ), args.shard_size ):
_lowerCamelCase : Tuple = grouped_dataset[shard : shard + args.shard_size]
_lowerCamelCase : Union[str, Any] = len(dataset_snapshot['''input_ids'''] )
_lowerCamelCase : List[str] = os.path.join(A_, F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
_lowerCamelCase : List[Any] = get_serialized_examples(A_ )
with tf.io.TFRecordWriter(A_ ) as out_file:
for i in range(len(A_ ) ):
_lowerCamelCase : str = serialized_examples[i]
out_file.write(A_ )
print('''Wrote file {} containing {} records'''.format(A_, A_ ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''', '''w''' ) as f:
print(F'''Total {args.split} records: {total_records}''', file=A_ )
if __name__ == "__main__":
lowerCAmelCase__ = parse_args()
main(args)
| 83 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case ( _lowercase):
def __init__( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : int=1_3 , __lowerCAmelCase : Optional[int]=7 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=9_9 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : Union[str, Any]=5 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Optional[int]=3_7 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : int=5_1_2 , __lowerCAmelCase : Tuple=1_6 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any="None" , __lowerCAmelCase : str=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Optional[Any]=None , ):
"""simple docstring"""
_lowerCamelCase : Dict = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : Optional[Any] = is_training
_lowerCamelCase : Dict = use_input_mask
_lowerCamelCase : Tuple = use_token_type_ids
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Optional[int] = num_labels
_lowerCamelCase : Any = num_choices
_lowerCamelCase : int = relative_attention
_lowerCamelCase : Union[str, Any] = position_biased_input
_lowerCamelCase : str = pos_att_type
_lowerCamelCase : Tuple = scope
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : List[Any] = None
if self.use_input_mask:
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Any = None
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : str ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
_lowerCamelCase : str = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DebertaVaForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.num_labels
_lowerCamelCase : Dict = DebertaVaForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.num_labels
_lowerCamelCase : Tuple = DebertaVaForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Any = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = DebertaVaForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[Any] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case__ : int = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = True
snake_case__ : List[Any] = False
snake_case__ : int = False
snake_case__ : Optional[Any] = False
snake_case__ : str = False
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = DebertaVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase):
@unittest.skip(reason='''Model not available yet''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
_lowerCamelCase : List[str] = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCamelCase : Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
# compare the actual values for a slice.
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 83 | 1 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
snake_case_ : Optional[int] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
snake_case_ : List[str] = 'main'
# Default branch name
snake_case_ : str = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
snake_case_ : Optional[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
snake_case_ : str = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
snake_case_ : Optional[Any] = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def __UpperCAmelCase ( ):
'''simple docstring'''
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def __UpperCAmelCase ( ):
'''simple docstring'''
print("Bonjour!" )
yield
print("Au revoir!" )
class __lowerCamelCase ( unittest.TestCase ):
def A__ ( self ) -> Tuple:
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class __lowerCamelCase ( unittest.TestCase ):
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def A__ ( self , __snake_case ) -> Any:
"""simple docstring"""
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def A__ ( self , __snake_case ) -> Any:
"""simple docstring"""
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def A__ ( self , __snake_case ) -> Tuple:
"""simple docstring"""
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def A__ ( self ) -> Tuple:
"""simple docstring"""
self.assertEqual(find_labels(__snake_case ) , ["labels"] )
self.assertEqual(find_labels(__snake_case ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(__snake_case ) , ["start_positions", "end_positions"] )
class __lowerCamelCase ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(__snake_case ) , ["labels"] )
@require_tf
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(find_labels(__snake_case ) , ["labels"] )
self.assertEqual(find_labels(__snake_case ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(__snake_case ) , ["start_positions", "end_positions"] )
class __lowerCamelCase ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(__snake_case ) , ["labels"] )
@require_flax
def A__ ( self ) -> Tuple:
"""simple docstring"""
self.assertEqual(find_labels(__snake_case ) , [] )
self.assertEqual(find_labels(__snake_case ) , [] )
self.assertEqual(find_labels(__snake_case ) , [] )
class __lowerCamelCase ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(__snake_case ) , [] )
| 714 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : str = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
snake_case_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 166 | 0 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any]=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[Any]=99 , _UpperCAmelCase : Tuple=32 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : str=37 , _UpperCAmelCase : str="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : List[str]=16 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : Optional[Any]=4 , ) -> Any:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def lowerCamelCase__ (self : Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_UpperCAmelCase , )
return config, input_ids, attention_mask
def lowerCamelCase__ (self : Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = FlaxDistilBertModelTester(self )
@slow
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained("""distilbert-base-uncased""" )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
@require_flax
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
lowercase__ = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
lowercase__ = (1, 11, 768)
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__ = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 ) )
| 15 |
"""simple docstring"""
def __lowercase ( snake_case_ : list ) ->float:
'''simple docstring'''
__A : Tuple = 0
while len(snake_case_ ) > 1:
__A : List[Any] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
__A : Dict = files.index(min(snake_case_ ) )
temp += files[min_index]
files.pop(snake_case_ )
files.append(snake_case_ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__A : str = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_A )
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Tuple = "rag"
SCREAMING_SNAKE_CASE_ : List[Any] = True
def __init__( self : str , A : int=None , A : Dict=True , A : int=None , A : int=None , A : Union[str, Any]=None , A : Optional[int]=None , A : Union[str, Any]=None , A : List[str]=" / " , A : Optional[Any]=" // " , A : List[Any]=5 , A : Any=3_00 , A : Any=7_68 , A : Any=8 , A : Dict="wiki_dpr" , A : Optional[int]="train" , A : List[str]="compressed" , A : Union[str, Any]=None , A : Dict=None , A : Optional[int]=False , A : int=False , A : Optional[int]=0.0 , A : Dict=True , A : Any=False , A : List[str]=False , A : Any=False , A : str=True , A : str=None , **A : Dict , ) -> int:
super().__init__(
bos_token_id=A , pad_token_id=A , eos_token_id=A , decoder_start_token_id=A , forced_eos_token_id=A , is_encoder_decoder=A , prefix=A , vocab_size=A , **A , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowercase_ : str = kwargs.pop('''question_encoder''' )
lowercase_ : str = question_encoder_config.pop('''model_type''' )
lowercase_ : int = kwargs.pop('''generator''' )
lowercase_ : Any = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase_ : Tuple = AutoConfig.for_model(A , **A )
lowercase_ : str = AutoConfig.for_model(A , **A )
lowercase_ : List[str] = reduce_loss
lowercase_ : Any = label_smoothing
lowercase_ : List[str] = exclude_bos_score
lowercase_ : Optional[int] = do_marginalize
lowercase_ : Tuple = title_sep
lowercase_ : Tuple = doc_sep
lowercase_ : Union[str, Any] = n_docs
lowercase_ : str = max_combined_length
lowercase_ : Any = dataset
lowercase_ : str = dataset_split
lowercase_ : List[Any] = index_name
lowercase_ : List[Any] = retrieval_vector_size
lowercase_ : Tuple = retrieval_batch_size
lowercase_ : Any = passages_path
lowercase_ : Optional[int] = index_path
lowercase_ : List[str] = use_dummy_dataset
lowercase_ : Tuple = output_retrieved
lowercase_ : Tuple = do_deduplication
lowercase_ : Optional[Any] = use_cache
if self.forced_eos_token_id is None:
lowercase_ : int = getattr(self.generator , '''forced_eos_token_id''' , A )
@classmethod
def A ( cls : Dict , A : PretrainedConfig , A : PretrainedConfig , **A : Any ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **A )
def A ( self : Optional[Any] ) -> Dict:
lowercase_ : Any = copy.deepcopy(self.__dict__ )
lowercase_ : Optional[Any] = self.question_encoder.to_dict()
lowercase_ : Any = self.generator.to_dict()
lowercase_ : Any = self.__class__.model_type
return output
| 720 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__A : str = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def lowercase ( ):
lowercase_ : Optional[Any] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowercase_ : List[Any] = get_sagemaker_input()
else:
lowercase_ : Union[str, Any] = get_cluster_input()
return config
def lowercase ( __snake_case : Any=None ):
if subparsers is not None:
lowercase_ : Any = subparsers.add_parser('''config''' , description=__snake_case )
else:
lowercase_ : str = argparse.ArgumentParser('''Accelerate config command''' , description=__snake_case )
parser.add_argument(
'''--config_file''' , default=__snake_case , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=__snake_case )
return parser
def lowercase ( __snake_case : int ):
lowercase_ : Optional[Any] = get_user_input()
if args.config_file is not None:
lowercase_ : Union[str, Any] = args.config_file
else:
if not os.path.isdir(__snake_case ):
os.makedirs(__snake_case )
lowercase_ : Optional[Any] = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(__snake_case )
else:
config.to_yaml_file(__snake_case )
print(F'''accelerate configuration saved at {config_file}''' )
def lowercase ( ):
lowercase_ : List[str] = config_command_parser()
lowercase_ : List[str] = parser.parse_args()
config_command(__snake_case )
if __name__ == "__main__":
main()
| 141 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def A ( lowercase__ : list , lowercase__ : list , lowercase__ : list , lowercase__ : list , lowercase__ : list ) -> float:
UpperCamelCase__ :int = np.array([[1, item, train_mtch[i]] for i, item in enumerate(lowercase__ )] )
UpperCamelCase__ :List[Any] = np.array(lowercase__ )
UpperCamelCase__ :Dict = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , lowercase__ ) ) , x.transpose() ) , lowercase__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def A ( lowercase__ : list , lowercase__ : list , lowercase__ : list ) -> float:
UpperCamelCase__ :Optional[int] = (1, 2, 1)
UpperCamelCase__ :Optional[int] = (1, 1, 0, 7)
UpperCamelCase__ :Optional[int] = SARIMAX(
lowercase__ , exog=lowercase__ , order=lowercase__ , seasonal_order=lowercase__ )
UpperCamelCase__ :Tuple = model.fit(disp=lowercase__ , maxiter=600 , method="""nm""" )
UpperCamelCase__ :int = model_fit.predict(1 , len(lowercase__ ) , exog=[test_match] )
return result[0]
def A ( lowercase__ : list , lowercase__ : list , lowercase__ : list ) -> float:
UpperCamelCase__ :Optional[Any] = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(lowercase__ , lowercase__ )
UpperCamelCase__ :Optional[int] = regressor.predict(lowercase__ )
return y_pred[0]
def A ( lowercase__ : list ) -> float:
train_user.sort()
UpperCamelCase__ :List[Any] = np.percentile(lowercase__ , 25 )
UpperCamelCase__ :Optional[Any] = np.percentile(lowercase__ , 75 )
UpperCamelCase__ :Optional[int] = qa - qa
UpperCamelCase__ :Tuple = qa - (iqr * 0.1)
return low_lim
def A ( lowercase__ : list , lowercase__ : float ) -> bool:
UpperCamelCase__ :List[Any] = 0
UpperCamelCase__ :str = 0
for i in list_vote:
if i > actual_result:
UpperCamelCase__ :List[str] = not_safe + 1
else:
if abs(abs(lowercase__ ) - abs(lowercase__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
UpperCamelCase = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
UpperCamelCase = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
UpperCamelCase = Normalizer().fit_transform(data_input_df.values)
# split data
UpperCamelCase = normalize_df[:, 2].tolist()
UpperCamelCase = normalize_df[:, 0].tolist()
UpperCamelCase = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
UpperCamelCase = normalize_df[:, [1, 2]].tolist()
UpperCamelCase = x[: len(x) - 1]
UpperCamelCase = x[len(x) - 1 :]
# for linear regression & sarimax
UpperCamelCase = total_date[: len(total_date) - 1]
UpperCamelCase = total_user[: len(total_user) - 1]
UpperCamelCase = total_match[: len(total_match) - 1]
UpperCamelCase = total_date[len(total_date) - 1 :]
UpperCamelCase = total_user[len(total_user) - 1 :]
UpperCamelCase = total_match[len(total_match) - 1 :]
# voting system with forecasting
UpperCamelCase = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
UpperCamelCase = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.") | 45 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def A__ ( A : Dict , A : Any , A : Tuple , A : Tuple):
'''simple docstring'''
UpperCamelCase : Tuple = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCamelCase : List[str] = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
UpperCamelCase : int = F'''{src_lang}-{tgt_lang}'''
UpperCamelCase : Any = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=A , exist_ok=A)
UpperCamelCase : int = os.path.join(A , "README.md")
print(F'''Generating {path}''')
with open(A , "w" , encoding="utf-8") as f:
f.write(A)
# make sure we are under the root of the project
lowerCAmelCase_ = Path(__file__).resolve().parent.parent.parent
lowerCAmelCase_ = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
lowerCAmelCase_ = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 173 | 0 |
import sys
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = len(__UpperCAmelCase )
UpperCAmelCase_ = [[0 for x in range(__UpperCAmelCase )] for x in range(__UpperCAmelCase )]
UpperCAmelCase_ = [[0 for x in range(__UpperCAmelCase )] for x in range(__UpperCAmelCase )]
for chain_length in range(2 , __UpperCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ = a + chain_length - 1
UpperCAmelCase_ = sys.maxsize
for c in range(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ = cost
UpperCAmelCase_ = c
return matrix, sol
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
if i == j:
print('''A''' + str(__UpperCAmelCase ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__UpperCAmelCase , __UpperCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(__UpperCAmelCase , optimal_solution[i][j] + 1 , __UpperCAmelCase )
print(''')''' , end=''' ''' )
def A ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = [30, 35, 15, 5, 10, 20, 25]
UpperCAmelCase_ = len(__UpperCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ = matrix_chain_order(__UpperCAmelCase )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__UpperCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 561 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__UpperCAmelCase , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = _distribute_shards(**__UpperCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = _split_gen_kwargs(__UpperCAmelCase , __UpperCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(__UpperCAmelCase ):
_number_of_shards_in_gen_kwargs(__UpperCAmelCase )
else:
UpperCAmelCase_ = _number_of_shards_in_gen_kwargs(__UpperCAmelCase )
assert out == expected
| 561 | 1 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def __UpperCamelCase ( *a : str , **a : int ) -> str:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCamelCase ( self : Optional[Any] , a : str , a : Optional[Any] , a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ObjectDetectionPipeline(model=a , image_processor=a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCamelCase ( self : List[Any] , a : Optional[int] , a : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
import datasets
SCREAMING_SNAKE_CASE : Any = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
SCREAMING_SNAKE_CASE : Dict = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
SCREAMING_SNAKE_CASE : Tuple = object_detector(a , threshold=0.0 )
self.assertEqual(len(a ) , len(a ) )
for outputs in batch_outputs:
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
pass
@require_torch
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = "hf-internal-testing/tiny-detr-mobilenetsv3"
SCREAMING_SNAKE_CASE : Dict = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : Tuple = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
SCREAMING_SNAKE_CASE : Dict = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : int = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : int = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Tuple = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : str = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 0.9985
SCREAMING_SNAKE_CASE : int = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : List[str] = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=a )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = "Narsil/layoutlmv3-finetuned-funsd"
SCREAMING_SNAKE_CASE : Dict = 0.9993
SCREAMING_SNAKE_CASE : str = pipeline("object-detection" , model=a , threshold=a )
SCREAMING_SNAKE_CASE : List[Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , ) | 25 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __magic_name__ ( _a):
def _UpperCAmelCase ( self : Tuple ):
UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"width_multiplier" ) )
class __magic_name__ :
def __init__( self : List[Any] ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : Optional[Any]=1_3 ,__SCREAMING_SNAKE_CASE : Optional[int]=6_4 ,__SCREAMING_SNAKE_CASE : Dict=2 ,__SCREAMING_SNAKE_CASE : List[str]=3 ,__SCREAMING_SNAKE_CASE : int="swish" ,__SCREAMING_SNAKE_CASE : str=3 ,__SCREAMING_SNAKE_CASE : Optional[int]=3_2 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 ,__SCREAMING_SNAKE_CASE : Optional[int]=0.02 ,__SCREAMING_SNAKE_CASE : Optional[int]=True ,__SCREAMING_SNAKE_CASE : Union[str, Any]=True ,__SCREAMING_SNAKE_CASE : str=1_0 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=None ,__SCREAMING_SNAKE_CASE : int=0.25 ,__SCREAMING_SNAKE_CASE : Tuple=0.0 ,__SCREAMING_SNAKE_CASE : Optional[int]=0.0 ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = make_divisible(5_1_2 * width_multiplier ,divisor=8 )
UpperCAmelCase = hidden_act
UpperCAmelCase = conv_kernel_size
UpperCAmelCase = output_stride
UpperCAmelCase = classifier_dropout_prob
UpperCAmelCase = use_labels
UpperCAmelCase = is_training
UpperCAmelCase = num_labels
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
UpperCAmelCase = width_multiplier
UpperCAmelCase = ffn_dropout
UpperCAmelCase = attn_dropout
def _UpperCAmelCase ( self : List[Any] ):
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCAmelCase ( self : Dict ):
return MobileViTVaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,width_multiplier=self.width_multiplier ,ffn_dropout=self.ffn_dropout_prob ,attn_dropout=self.attn_dropout_prob ,)
def _UpperCAmelCase ( self : List[Any] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : int ):
UpperCAmelCase = MobileViTVaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def _UpperCAmelCase ( self : Optional[int] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Tuple ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = MobileViTVaForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self : List[Any] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : Optional[int] ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = MobileViTVaForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _a , _a , unittest.TestCase):
_UpperCAmelCase : Tuple = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase : List[Any] = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : int = False
_UpperCAmelCase : Any = False
def _UpperCAmelCase ( self : Tuple ):
UpperCAmelCase = MobileViTVaModelTester(self )
UpperCAmelCase = MobileViTVaConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,has_text_modality=__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def _UpperCAmelCase ( self : Optional[int] ):
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def _UpperCAmelCase ( self : Optional[int] ):
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def _UpperCAmelCase ( self : Any ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def _UpperCAmelCase ( self : Optional[int] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCAmelCase ( self : Any ):
pass
def _UpperCAmelCase ( self : str ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[str] ):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : List[str] ):
UpperCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = 5
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,__SCREAMING_SNAKE_CASE )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase = 2
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2 )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : str ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : Optional[int] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = MobileViTVaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self : Any ):
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
__SCREAMING_SNAKE_CASE )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE ,return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
UpperCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self : Optional[Any] ):
UpperCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE ,return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase = outputs.logits
# verify the logits
UpperCAmelCase = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] ,device=__SCREAMING_SNAKE_CASE ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE ,return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase = outputs.logits.detach().cpu()
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE ,target_sizes=[(5_0, 6_0)] )
UpperCAmelCase = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape ,__SCREAMING_SNAKE_CASE )
| 333 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCamelCase : Union[str, Any] = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Any = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 710 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class _lowerCAmelCase( _a , unittest.TestCase):
"""simple docstring"""
lowerCamelCase__ = MvpTokenizer
lowerCamelCase__ = MvpTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = filter_roberta_detectors
def SCREAMING_SNAKE_CASE__ ( self )-> Dict:
super().setUp()
__A = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__A = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
__A = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__A = {'''unk_token''': '''<unk>'''}
__A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , **UpperCAmelCase )-> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , **UpperCAmelCase )-> Dict:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> List[str]:
return "lower newer", "lower newer"
@cached_property
def SCREAMING_SNAKE_CASE__ ( self )-> List[Any]:
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self )-> Dict:
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self )-> int:
__A = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__A = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A = tokenizer(UpperCAmelCase , max_length=len(UpperCAmelCase ) , padding=UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__A = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Test that special tokens are reset
@require_torch
def SCREAMING_SNAKE_CASE__ ( self )-> int:
__A = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' , UpperCAmelCase )
self.assertIn('''attention_mask''' , UpperCAmelCase )
self.assertNotIn('''labels''' , UpperCAmelCase )
self.assertNotIn('''decoder_attention_mask''' , UpperCAmelCase )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self )-> Tuple:
__A = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A = tokenizer(text_target=UpperCAmelCase , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self )-> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A = tokenizer(
['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 10_24) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self )-> List[Any]:
__A = ['''A long paragraph for summarization.''']
__A = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A = tokenizer(UpperCAmelCase , text_target=UpperCAmelCase , return_tensors='''pt''' )
__A = inputs['''input_ids''']
__A = inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self )-> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__A = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
__A = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
__A = '''A, <mask> AllenNLP sentence.'''
__A = tokenizer_r.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
__A = tokenizer_p.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__A = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__A = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 341 | 0 |
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
def decorator(_lowerCamelCase ):
__snake_case : str = getattr(_lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(_lowerCamelCase , """handle_key""" , _lowerCamelCase )
return func
return decorator
def _a ( *_lowerCamelCase ) -> str:
"""simple docstring"""
def decorator(_lowerCamelCase ):
__snake_case : List[Any] = getattr(_lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(_lowerCamelCase , """handle_key""" , _lowerCamelCase )
return func
return decorator
class _A ( __lowercase ):
def __new__( cls : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : str = super().__new__(cls , __magic_name__ , __magic_name__ , __magic_name__ )
if not hasattr(__magic_name__ , """key_handler""" ):
setattr(__magic_name__ , """key_handler""" , {} )
setattr(__magic_name__ , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__snake_case : Optional[int] = getattr(__magic_name__ , """handle_key""" , [] )
for key in handled_keys:
__snake_case : int = value
return new_cls
@staticmethod
def lowercase__ ( cls : Any ) -> Dict:
"""simple docstring"""
__snake_case : Optional[Any] = get_character()
if char != KEYMAP["undefined"]:
__snake_case : Tuple = ord(__magic_name__ )
__snake_case : List[Any] = cls.key_handler.get(__magic_name__ )
if handler:
__snake_case : Any = char
return handler(cls )
else:
return None
def _a ( cls ) -> str:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 26 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : str = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 384}
A : Optional[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Dict = size
# Default value set here for backwards compatibility where the value in config is None
A : Dict = crop_pct if crop_pct is not None else 224 / 256
A : Optional[int] = resample
A : List[str] = do_rescale
A : Tuple = rescale_factor
A : Optional[int] = do_normalize
A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
A : List[str] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : int = int(shortest_edge / crop_pct )
A : List[Any] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Dict = do_resize if do_resize is not None else self.do_resize
A : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
A : str = resample if resample is not None else self.resample
A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Dict = do_normalize if do_normalize is not None else self.do_normalize
A : List[str] = image_mean if image_mean is not None else self.image_mean
A : Optional[Any] = image_std if image_std is not None else self.image_std
A : Optional[Any] = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : List[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : str = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Tuple = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
| 662 | 0 |
import requests
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
UpperCAmelCase_ = {"Content-Type": "application/json"}
UpperCAmelCase_ = requests.post(__SCREAMING_SNAKE_CASE , json={"text": message_body} , headers=__SCREAMING_SNAKE_CASE )
if response.status_code != 200:
UpperCAmelCase_ = (
"Request to slack returned an error "
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 23 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ):
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=lowerCAmelCase ).to(lowerCAmelCase )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase_ = tokenizer("Hello there" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = model(input_ids.to(lowerCAmelCase ) , labels=labels.to(lowerCAmelCase ) ).loss
UpperCAmelCase_ = -(labels.shape[-1] * loss.item())
UpperCAmelCase_ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 23 | 1 |
from PIL import Image
def _A ( lowerCamelCase , lowerCamelCase ):
def brightness(lowerCamelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE__ : int = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 112 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = ["""pixel_values"""]
def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BICUBIC , snake_case = True , snake_case = 1 / 255 , snake_case = True , snake_case = None , snake_case = None , snake_case = True , **snake_case , ) -> None:
"""simple docstring"""
super().__init__(**snake_case )
a__ : List[str] = size if size is not None else {"height": 384, "width": 384}
a__ : List[str] = get_size_dict(snake_case , default_to_square=snake_case )
a__ : Any = do_resize
a__ : Optional[int] = size
a__ : int = resample
a__ : Optional[Any] = do_rescale
a__ : Dict = rescale_factor
a__ : Dict = do_normalize
a__ : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a__ : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
a__ : str = do_convert_rgb
def _snake_case ( self , snake_case , snake_case , snake_case = PILImageResampling.BICUBIC , snake_case = None , **snake_case , ) -> np.ndarray:
"""simple docstring"""
a__ : Optional[int] = get_size_dict(snake_case , default_to_square=snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
a__ : Optional[int] = (size["height"], size["width"])
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def _snake_case ( self , snake_case , snake_case , snake_case = None , **snake_case , ) -> List[str]:
"""simple docstring"""
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ) -> np.ndarray:
"""simple docstring"""
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def _snake_case ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ) -> PIL.Image.Image:
"""simple docstring"""
a__ : List[str] = do_resize if do_resize is not None else self.do_resize
a__ : Optional[Any] = resample if resample is not None else self.resample
a__ : int = do_rescale if do_rescale is not None else self.do_rescale
a__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
a__ : List[Any] = image_mean if image_mean is not None else self.image_mean
a__ : int = image_std if image_std is not None else self.image_std
a__ : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a__ : Tuple = size if size is not None else self.size
a__ : Any = get_size_dict(snake_case , default_to_square=snake_case )
a__ : str = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a__ : List[str] = [convert_to_rgb(snake_case ) for image in images]
# All transformations expect numpy arrays.
a__ : Dict = [to_numpy_array(snake_case ) for image in images]
if do_resize:
a__ : Union[str, Any] = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
if do_rescale:
a__ : Tuple = [self.rescale(image=snake_case , scale=snake_case ) for image in images]
if do_normalize:
a__ : List[str] = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images]
a__ : Union[str, Any] = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
a__ : List[Any] = BatchFeature(data={"pixel_values": images} , tensor_type=snake_case )
return encoded_outputs
| 112 | 1 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase , UpperCAmelCase : Tuple = image.size
UpperCAmelCase , UpperCAmelCase : Optional[Any] = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
UpperCAmelCase : int = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
UpperCAmelCase : Tuple = np.array(_A ).astype(np.floataa ) / 2_5_5.0
UpperCAmelCase : Union[str, Any] = image[None].transpose(0 , 3 , 1 , 2 )
UpperCAmelCase : Dict = torch.from_numpy(_A )
return 2.0 * image - 1.0
class UpperCamelCase_ ( lowerCAmelCase__ ):
def __init__( self , A , A , A , ) -> Union[str, Any]:
super().__init__()
self.register_modules(vqvae=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , A = None , A = 1 , A = 100 , A = 0.0 , A = None , A = "pil" , A = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
UpperCAmelCase : int = 1
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
UpperCAmelCase : Tuple = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_SCREAMING_SNAKE_CASE )}''' )
if isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
UpperCAmelCase : Optional[int] = preprocess(_SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase : Tuple = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCAmelCase : Union[str, Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCAmelCase : Optional[int] = next(self.unet.parameters() ).dtype
UpperCAmelCase : Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = image.to(device=self.device , dtype=_SCREAMING_SNAKE_CASE )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=self.device )
UpperCAmelCase : Any = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase : Dict = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase : Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase : Any = {}
if accepts_eta:
UpperCAmelCase : Optional[Any] = eta
for t in self.progress_bar(_SCREAMING_SNAKE_CASE ):
# concat latents and low resolution image in the channel dimension.
UpperCAmelCase : Union[str, Any] = torch.cat([latents, image] , dim=1 )
UpperCAmelCase : List[str] = self.scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# predict the noise residual
UpperCAmelCase : Any = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Any = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
# decode the image latents with the VQVAE
UpperCAmelCase : str = self.vqvae.decode(_SCREAMING_SNAKE_CASE ).sample
UpperCAmelCase : Optional[int] = torch.clamp(_SCREAMING_SNAKE_CASE , -1.0 , 1.0 )
UpperCAmelCase : Tuple = image / 2 + 0.5
UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : Optional[Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 709 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a : List[str] = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self , A ) -> Optional[int]:
if isinstance(A , A ):
UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self , A , A , A ) -> str:
if len(A ) == 0 or len(A ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(A ) )
if isinstance(A , A ):
UpperCAmelCase : Tuple = [sequences]
UpperCAmelCase : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A=ZeroShotClassificationArgumentHandler() , *A , **A ) -> Optional[int]:
UpperCAmelCase : Tuple = args_parser
super().__init__(*A , **A )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _lowercase( self ) -> List[Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _lowercase( self , A , A=True , A=True , A=TruncationStrategy.ONLY_FIRST , **A ) -> str:
UpperCAmelCase : Tuple = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
UpperCAmelCase : Any = self.tokenizer.eos_token
try:
UpperCAmelCase : Tuple = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=A , )
except Exception as e:
if "too short" in str(A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase : List[str] = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase( self , **A ) -> Tuple:
if kwargs.get("""multi_class""" , A ) is not None:
UpperCAmelCase : Any = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
UpperCAmelCase : int = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Tuple = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
UpperCAmelCase : List[Any] = kwargs["""hypothesis_template"""]
UpperCAmelCase : Dict = {}
if "multi_label" in kwargs:
UpperCAmelCase : Union[str, Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self , A , *A , **A , ) -> Tuple:
if len(A ) == 0:
pass
elif len(A ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase : Optional[Any] = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(A , **A )
def _lowercase( self , A , A=None , A="This example is {}." ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : List[Any] = self._args_parser(A , A , A )
for i, (candidate_label, sequence_pair) in enumerate(zip(A , A ) ):
UpperCAmelCase : Any = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A ) - 1,
**model_input,
}
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = inputs["""candidate_label"""]
UpperCAmelCase : Tuple = inputs["""sequence"""]
UpperCAmelCase : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase : Tuple = self.model(**A )
UpperCAmelCase : Optional[int] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _lowercase( self , A , A=False ) -> List[str]:
UpperCAmelCase : Dict = [outputs["""candidate_label"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = [outputs["""sequence"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
UpperCAmelCase : Optional[Any] = logits.shape[0]
UpperCAmelCase : int = len(A )
UpperCAmelCase : List[Any] = N // n
UpperCAmelCase : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase : str = self.entailment_id
UpperCAmelCase : str = -1 if entailment_id == 0 else 0
UpperCAmelCase : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase : int = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase : Dict = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase : Optional[int] = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 672 | 0 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
UpperCamelCase__ =datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__( datasets.BuilderConfig ):
'''simple docstring'''
__snake_case = 1_0_0_0_0
__snake_case = None
__snake_case = None
class lowerCAmelCase__( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__snake_case = ParquetConfig
def UpperCamelCase_ ( self ) -> Union[str, Any]:
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> int:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_SCREAMING_SNAKE_CASE : Tuple = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCamelCase , (str, list, tuple) ):
_SCREAMING_SNAKE_CASE : Optional[int] = data_files
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_SCREAMING_SNAKE_CASE : Union[str, Any] = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_SCREAMING_SNAKE_CASE : Dict = []
for split_name, files in data_files.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_SCREAMING_SNAKE_CASE : str = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__lowerCamelCase ):
with open(__lowerCamelCase , "rb" ) as f:
_SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(__lowerCamelCase ) )
break
splits.append(datasets.SplitGenerator(name=__lowerCamelCase , gen_kwargs={"files": files} ) )
return splits
def UpperCamelCase_ ( self , __lowerCamelCase ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_SCREAMING_SNAKE_CASE : List[Any] = table_cast(__lowerCamelCase , self.info.features.arrow_schema )
return pa_table
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : List[Any] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCamelCase ) ):
with open(__lowerCamelCase , "rb" ) as f:
_SCREAMING_SNAKE_CASE : List[Any] = pq.ParquetFile(__lowerCamelCase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_SCREAMING_SNAKE_CASE : str = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(__lowerCamelCase )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(__lowerCamelCase )}: {e}""" )
raise | 249 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase__ =subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
UpperCamelCase__ =subprocess.check_output(f"git diff --name-only {fork_point_sha}".split()).decode('utf-8').split()
UpperCamelCase__ ='|'.join(sys.argv[1:])
UpperCamelCase__ =re.compile(Rf"^({joined_dirs}).*?\.py$")
UpperCamelCase__ =[x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='') | 249 | 1 |
from __future__ import annotations
def a ( A__ , A__ = None ) -> list[list[str]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = word_bank or []
# create a table
SCREAMING_SNAKE_CASE__ : int = len(A__ ) + 1
SCREAMING_SNAKE_CASE__ : list[list[list[str]]] = []
for _ in range(A__ ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE__ : Any = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(A__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(A__ )] == word:
SCREAMING_SNAKE_CASE__ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(A__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(A__ )]:
combination.reverse()
return table[len(A__ )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 716 |
def a ( A__ , A__ , A__ ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
SCREAMING_SNAKE_CASE__ : List[Any] = rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
SCREAMING_SNAKE_CASE__ : Union[str, Any] = years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 | 0 |
__a = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->list[str]:
UpperCAmelCase = set()
# keep track of all the paths to be checked
UpperCAmelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCAmelCase = queue.pop(0 )
# get the last node from the path
UpperCAmelCase = path[-1]
if node not in explored:
UpperCAmelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCAmelCase = list(lowerCAmelCase_ )
new_path.append(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCAmelCase_ )
# in case there's no path between the 2 nodes
return []
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCAmelCase = [start]
UpperCAmelCase = set(lowerCAmelCase_ )
# Keep tab on distances from `start` node.
UpperCAmelCase = {start: 0, target: -1}
while queue:
UpperCAmelCase = queue.pop(0 )
if node == target:
UpperCAmelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
UpperCAmelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
| 377 |
def _UpperCamelCase ( lowerCAmelCase_ ) ->Any:
UpperCAmelCase = 0
UpperCAmelCase = len(lowerCAmelCase_ )
for i in range(n - 1 ):
for j in range(i + 1 , lowerCAmelCase_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _UpperCamelCase ( lowerCAmelCase_ ) ->Any:
if len(lowerCAmelCase_ ) <= 1:
return arr, 0
UpperCAmelCase = len(lowerCAmelCase_ ) // 2
UpperCAmelCase = arr[0:mid]
UpperCAmelCase = arr[mid:]
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = _count_cross_inversions(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->int:
UpperCAmelCase = []
UpperCAmelCase = UpperCAmelCase = UpperCAmelCase = 0
while i < len(lowerCAmelCase_ ) and j < len(lowerCAmelCase_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowerCAmelCase_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowerCAmelCase_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _UpperCamelCase ( ) ->int:
UpperCAmelCase = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
UpperCAmelCase = count_inversions_bf(lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , lowerCAmelCase_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
UpperCAmelCase = count_inversions_bf(lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , lowerCAmelCase_ )
# an empty list should also have zero inversions
UpperCAmelCase = []
UpperCAmelCase = count_inversions_bf(lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 377 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase: int = [0] * len(_UpperCamelCase )
_lowercase: int = []
_lowercase: Union[str, Any] = []
_lowercase: Union[str, Any] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(_UpperCamelCase )
while queue:
_lowercase: List[Any] = queue.pop(0 )
cnt += 1
topo.append(_UpperCamelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_UpperCamelCase )
if cnt != len(_UpperCamelCase ):
print('''Cycle exists''' )
else:
print(_UpperCamelCase )
# Adjacency List of Graph
A__ : Union[str, Any] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 716 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A__ : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __magic_name__ ( datasets.BuilderConfig ):
UpperCamelCase_ = None
UpperCamelCase_ = "utf-8"
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = True # deprecated
UpperCamelCase_ = None # deprecated
UpperCamelCase_ = 10 << 20 # 10MB
UpperCamelCase_ = None
class __magic_name__ ( datasets.ArrowBasedBuilder ):
UpperCamelCase_ = JsonConfig
def lowercase_ ( self ) -> str:
"""simple docstring"""
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
_lowercase: List[str] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def lowercase_ ( self , A_ ) -> Any:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_lowercase: int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A_ , (str, list, tuple) ):
_lowercase: Tuple = data_files
if isinstance(A_ , A_ ):
_lowercase: Optional[Any] = [files]
_lowercase: Dict = [dl_manager.iter_files(A_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_lowercase: str = []
for split_name, files in data_files.items():
if isinstance(A_ , A_ ):
_lowercase: Optional[Any] = [files]
_lowercase: str = [dl_manager.iter_files(A_ ) for file in files]
splits.append(datasets.SplitGenerator(name=A_ , gen_kwargs={'''files''': files} ) )
return splits
def lowercase_ ( self , A_ ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
_lowercase: Any = self.config.features.arrow_schema.field(A_ ).type
_lowercase: Optional[Any] = pa_table.append_column(A_ , pa.array([None] * len(A_ ) , type=A_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowercase: Optional[int] = table_cast(A_ , self.config.features.arrow_schema )
return pa_table
def lowercase_ ( self , A_ ) -> str:
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(A_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_lowercase: Optional[int] = json.load(A_ )
# We keep only the field we are interested in
_lowercase: str = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A_ , (list, tuple) ):
_lowercase: Dict = set().union(*[row.keys() for row in dataset] )
_lowercase: List[str] = {col: [row.get(A_ ) for row in dataset] for col in keys}
else:
_lowercase: Dict = dataset
_lowercase: Union[str, Any] = pa.Table.from_pydict(A_ )
yield file_idx, self._cast_table(A_ )
# If the file has one json object per line
else:
with open(A_ , '''rb''' ) as f:
_lowercase: int = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
_lowercase: Optional[int] = max(self.config.chunksize // 32 , 16 << 10 )
_lowercase: List[Any] = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
_lowercase: Union[str, Any] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
_lowercase: Any = batch.decode(self.config.encoding , errors=A_ ).encode('''utf-8''' )
try:
while True:
try:
_lowercase: Optional[int] = paj.read_json(
io.BytesIO(A_ ) , read_options=paj.ReadOptions(block_size=A_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A_ , pa.ArrowInvalid )
and "straddling" not in str(A_ )
or block_size > len(A_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'''Batch of {len(A_ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_lowercase: Optional[Any] = json.load(A_ )
except json.JSONDecodeError:
logger.error(f'''Failed to read file \'{file}\' with error {type(A_ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A_ , A_ ): # list is the only sequence type supported in JSON
try:
_lowercase: Optional[int] = set().union(*[row.keys() for row in dataset] )
_lowercase: Tuple = {col: [row.get(A_ ) for row in dataset] for col in keys}
_lowercase: str = pa.Table.from_pydict(A_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(A_ )}: {e}''' )
raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(A_ )
break
else:
logger.error(f'''Failed to read file \'{file}\' with error {type(A_ )}: {e}''' )
raise ValueError(
f'''Not able to read records in the JSON file at {file}. '''
f'''You should probably indicate the field of the JSON file containing your records. '''
f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A_ )
batch_idx += 1
| 272 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase ,'''width_multiplier''' ) )
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase=13 ,_lowerCamelCase=64 ,_lowerCamelCase=2 ,_lowerCamelCase=3 ,_lowerCamelCase="swish" ,_lowerCamelCase=3 ,_lowerCamelCase=32 ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=10 ,_lowerCamelCase=None ,_lowerCamelCase=0.2_5 ,_lowerCamelCase=0.0 ,_lowerCamelCase=0.0 ,) -> Any:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = make_divisible(512 * width_multiplier ,divisor=8 )
__lowercase = hidden_act
__lowercase = conv_kernel_size
__lowercase = output_stride
__lowercase = classifier_dropout_prob
__lowercase = use_labels
__lowercase = is_training
__lowercase = num_labels
__lowercase = initializer_range
__lowercase = scope
__lowercase = width_multiplier
__lowercase = ffn_dropout
__lowercase = attn_dropout
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,width_multiplier=self.width_multiplier ,ffn_dropout=self.ffn_dropout_prob ,attn_dropout=self.attn_dropout_prob ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = MobileViTVaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = MobileViTVaForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase ,labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = MobileViTVaForSemanticSegmentation(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
__lowercase = model(_lowerCamelCase ,labels=_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a : str = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a : Any = False
a : Tuple = False
a : Dict = False
a : str = False
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = MobileViTVaModelTester(self )
__lowercase = MobileViTVaConfigTester(self ,config_class=_lowerCamelCase ,has_text_modality=_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCamelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ):
__lowercase = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCamelCase ,_lowerCamelCase ) )
__lowercase = outputs.hidden_states
__lowercase = 5
self.assertEqual(len(_lowerCamelCase ) ,_lowerCamelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__lowercase = 2
for i in range(len(_lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2 )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@slow
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = MobileViTVaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _lowerCAmelCase ( ):
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
_lowerCamelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCamelCase ,return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCamelCase )
# verify the logits
__lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_lowerCamelCase )
__lowercase = torch.tensor([-1.6_3_3_6E0_0, -7.3_2_0_4E-0_2, -5.1_8_8_3E-0_1] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_lowerCamelCase ,atol=1E-4 ) )
@slow
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__lowercase = model.to(_lowerCamelCase )
__lowercase = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCamelCase ,return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCamelCase )
__lowercase = outputs.logits
# verify the logits
__lowercase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape ,_lowerCamelCase )
__lowercase = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] ,device=_lowerCamelCase ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,_lowerCamelCase ,atol=1E-4 ) )
@slow
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__lowercase = model.to(_lowerCamelCase )
__lowercase = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCamelCase ,return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCamelCase )
__lowercase = outputs.logits.detach().cpu()
__lowercase = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase ,target_sizes=[(50, 60)] )
__lowercase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape ,_lowerCamelCase )
__lowercase = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase )
__lowercase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape ,_lowerCamelCase )
| 502 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase=13 ,_lowerCamelCase=7 ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=99 ,_lowerCamelCase=64 ,_lowerCamelCase=5 ,_lowerCamelCase=4 ,_lowerCamelCase=37 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=512 ,_lowerCamelCase=16 ,_lowerCamelCase=2 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=3 ,_lowerCamelCase=4 ,_lowerCamelCase=None ,) -> Dict:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = vocab_size - 1
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_lowerCamelCase ,initializer_range=self.initializer_range ,pad_token_id=self.pad_token_id ,)
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = self.prepare_config_and_inputs()
__lowercase = True
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = GPTNeoXModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase )
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = True
__lowercase = GPTNeoXModel(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = GPTNeoXForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase ,labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = GPTNeoXForQuestionAnswering(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = GPTNeoXForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase ,labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = GPTNeoXForTokenClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase ,labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = True
__lowercase = GPTNeoXForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
# first forward pass
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase ,use_cache=_lowerCamelCase )
__lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) ,config.vocab_size )
__lowercase = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
__lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
__lowercase = torch.cat([input_mask, next_mask] ,dim=-1 )
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase ,output_hidden_states=_lowerCamelCase )
__lowercase = output_from_no_past['''hidden_states'''][0]
__lowercase = model(
_lowerCamelCase ,attention_mask=_lowerCamelCase ,past_key_values=_lowerCamelCase ,output_hidden_states=_lowerCamelCase ,)['''hidden_states'''][0]
# select random slice
__lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
__lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=1E-3 ) )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Dict = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
a : str = (GPTNeoXForCausalLM,) if is_torch_available() else ()
a : Dict = (
{
"feature-extraction": GPTNeoXModel,
"question-answering": GPTNeoXForQuestionAnswering,
"text-classification": GPTNeoXForSequenceClassification,
"text-generation": GPTNeoXForCausalLM,
"token-classification": GPTNeoXForTokenClassification,
"zero-shot": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Dict = False
a : Optional[Any] = False
a : Tuple = False
a : List[Any] = False
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = GPTNeoXModelTester(self )
__lowercase = ConfigTester(self ,config_class=_lowerCamelCase ,hidden_size=64 ,num_attention_heads=8 )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
@unittest.skip(reason='''Feed forward chunking is not implemented''' )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = ids_tensor([1, 10] ,config.vocab_size )
__lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = GPTNeoXModel(_lowerCamelCase )
original_model.to(_lowerCamelCase )
original_model.eval()
__lowercase = original_model(_lowerCamelCase ).last_hidden_state
__lowercase = original_model(_lowerCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = {'''type''': scaling_type, '''factor''': 1_0.0}
__lowercase = GPTNeoXModel(_lowerCamelCase )
scaled_model.to(_lowerCamelCase )
scaled_model.eval()
__lowercase = scaled_model(_lowerCamelCase ).last_hidden_state
__lowercase = scaled_model(_lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=1E-5 ) )
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
for checkpointing in [True, False]:
__lowercase = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(_lowerCamelCase )
__lowercase = tokenizer('''My favorite food is''' ,return_tensors='''pt''' ).to(_lowerCamelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__lowercase = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'''
__lowercase = model.generate(**_lowerCamelCase ,do_sample=_lowerCamelCase ,max_new_tokens=20 )
__lowercase = tokenizer.batch_decode(_lowerCamelCase )[0]
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
| 502 | 1 |
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCAmelCase_ : List[Any] = logging.getLogger(__name__)
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Dict = """masked_bert"""
def __init__( self : List[str] , __lowerCamelCase : Optional[Any]=30_522 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : List[Any]=3_072 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : str=1E-12 , __lowerCamelCase : Any=0 , __lowerCamelCase : Optional[int]="topK" , __lowerCamelCase : Optional[Any]="constant" , __lowerCamelCase : int=0.0 , **__lowerCamelCase : Tuple , ):
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
UpperCamelCase :List[str] = vocab_size
UpperCamelCase :List[Any] = hidden_size
UpperCamelCase :str = num_hidden_layers
UpperCamelCase :Optional[int] = num_attention_heads
UpperCamelCase :str = hidden_act
UpperCamelCase :Union[str, Any] = intermediate_size
UpperCamelCase :Optional[Any] = hidden_dropout_prob
UpperCamelCase :Any = attention_probs_dropout_prob
UpperCamelCase :Union[str, Any] = max_position_embeddings
UpperCamelCase :List[Any] = type_vocab_size
UpperCamelCase :Optional[Any] = initializer_range
UpperCamelCase :Optional[int] = layer_norm_eps
UpperCamelCase :List[str] = pruning_method
UpperCamelCase :Dict = mask_init
UpperCamelCase :Dict = mask_scale
| 711 |
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[int] ) -> float:
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
UpperCamelCase :List[Any] = sum(__magic_name__ ) / len(__magic_name__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 590 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self : Dict ):
A__ : Any =AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
A__ : List[str] =AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
model.to(lowercase__ )
from datasets import load_dataset
A__ : Union[str, Any] =load_dataset("nielsr/rvlcdip-demo" )
A__ : int =dataset['train'][0]['image'].convert("RGB" )
A__ : Tuple =image_processor(lowercase__ , return_tensors="pt" ).to(lowercase__ )
# forward pass
with torch.no_grad():
A__ : Optional[Any] =model(**lowercase__ )
A__ : Optional[Any] =outputs.logits
A__ : str =torch.Size((1, 16) )
self.assertEqual(logits.shape , lowercase__ )
A__ : int =torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=lowercase__ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , lowercase__ , atol=1E-4 ) )
| 656 | '''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_lowerCAmelCase :Any = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_lowerCAmelCase :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print("""\n""".join(upper_files) + """\n""")
_lowerCAmelCase :Optional[int] = [file for file in filepaths if """ """ in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print("""\n""".join(space_files) + """\n""")
_lowerCAmelCase :List[str] = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print("""\n""".join(hyphen_files) + """\n""")
_lowerCAmelCase :Optional[int] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print("""\n""".join(nodir_files) + """\n""")
_lowerCAmelCase :str = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 251 | 0 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class __magic_name__ ( __a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = XLMProphetNetTokenizer
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : List[str] = True
def lowerCAmelCase ( self : int ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase: Tuple = XLMProphetNetTokenizer(_lowercase , keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
_UpperCamelCase: Any = '''[PAD]'''
_UpperCamelCase: Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_lowercase ) , 1_012 )
def lowerCAmelCase ( self : str ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = XLMProphetNetTokenizer(_lowercase , keep_accents=_lowercase )
_UpperCamelCase: Optional[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_UpperCamelCase: Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase: Tuple = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
_UpperCamelCase: int = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCamelCase: List[str] = '''Hello World!'''
_UpperCamelCase: str = [35_389, 6_672, 49, 2]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) )
@slow
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: List[str] = {'''input_ids''': [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , ) | 713 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __magic_name__ ( __a ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = '''philschmid/bart-large-cnn-samsum'''
lowerCAmelCase : Any = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
lowerCAmelCase : Any = '''summarizer'''
lowerCAmelCase : Tuple = AutoTokenizer
lowerCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM
lowerCAmelCase : Union[str, Any] = ['''text''']
lowerCAmelCase : Dict = ['''text''']
def lowerCAmelCase ( self : str , _lowercase : Union[str, Any] ):
"""simple docstring"""
return self.pre_processor(_lowercase , return_tensors='''pt''' , truncation=_lowercase )
def lowerCAmelCase ( self : List[Any] , _lowercase : Optional[Any] ):
"""simple docstring"""
return self.model.generate(**_lowercase )[0]
def lowerCAmelCase ( self : int , _lowercase : List[str] ):
"""simple docstring"""
return self.pre_processor.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase ) | 264 | 0 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
lowercase__ = MaskFormerConfig(backbone_config=SCREAMING_SNAKE_CASE )
lowercase__ = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
lowercase__ = 8_47
lowercase__ = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
lowercase__ = 1_50
lowercase__ = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
lowercase__ = 1_71
lowercase__ = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
lowercase__ = 1_33
lowercase__ = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
lowercase__ = 19
lowercase__ = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
lowercase__ = 65
lowercase__ = '''mapillary-vistas-id2label.json'''
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
return config
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm1.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm1.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.proj.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.proj.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm2.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm2.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.layers.{i}.downsample.reduction.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.layers.{i}.downsample.norm.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.layers.{i}.downsample.norm.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'sem_seg_head.adapter_{source_index}.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((f'sem_seg_head.adapter_{source_index}.norm.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((f'sem_seg_head.adapter_{source_index}.norm.bias', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((f'sem_seg_head.layer_{source_index}.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((f'sem_seg_head.layer_{source_index}.norm.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((f'sem_seg_head.layer_{source_index}.norm.bias', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', f'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', f'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', f'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', f'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', f'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', f'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', f'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', f'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', f'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', f'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', f'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', f'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', f'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', f'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((f'sem_seg_head.predictor.mask_embed.layers.{i}.weight', f'mask_embedder.{i}.0.weight') )
rename_keys.append((f'sem_seg_head.predictor.mask_embed.layers.{i}.bias', f'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = dct.pop(SCREAMING_SNAKE_CASE )
lowercase__ = val
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowercase__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
lowercase__ = state_dict.pop(f'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:dim, :]
lowercase__ = in_proj_bias[: dim]
lowercase__ = in_proj_weight[
dim : dim * 2, :
]
lowercase__ = in_proj_bias[
dim : dim * 2
]
lowercase__ = in_proj_weight[
-dim :, :
]
lowercase__ = in_proj_bias[-dim :]
# fmt: on
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
lowercase__ = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[: hidden_size, :]
lowercase__ = in_proj_bias[:config.hidden_size]
lowercase__ = in_proj_weight[hidden_size : hidden_size * 2, :]
lowercase__ = in_proj_bias[hidden_size : hidden_size * 2]
lowercase__ = in_proj_weight[-hidden_size :, :]
lowercase__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
lowercase__ = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[: hidden_size, :]
lowercase__ = in_proj_bias[:config.hidden_size]
lowercase__ = in_proj_weight[hidden_size : hidden_size * 2, :]
lowercase__ = in_proj_bias[hidden_size : hidden_size * 2]
lowercase__ = in_proj_weight[-hidden_size :, :]
lowercase__ = in_proj_bias[-hidden_size :]
# fmt: on
def _a ( ):
"""simple docstring"""
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
lowercase__ = get_maskformer_config(SCREAMING_SNAKE_CASE )
# load original state_dict
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as f:
lowercase__ = pickle.load(SCREAMING_SNAKE_CASE )
lowercase__ = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
lowercase__ = create_rename_keys(SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_swin_q_k_v(SCREAMING_SNAKE_CASE , config.backbone_config )
read_in_decoder_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# update to torch tensors
for key, value in state_dict.items():
lowercase__ = torch.from_numpy(SCREAMING_SNAKE_CASE )
# load 🤗 model
lowercase__ = MaskFormerForInstanceSegmentation(SCREAMING_SNAKE_CASE )
model.eval()
for name, param in model.named_parameters():
print(SCREAMING_SNAKE_CASE , param.shape )
lowercase__ , lowercase__ = model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(SCREAMING_SNAKE_CASE ) == 0, f'Unexpected keys: {unexpected_keys}'
# verify results
lowercase__ = prepare_img()
if "vistas" in model_name:
lowercase__ = 65
elif "cityscapes" in model_name:
lowercase__ = 6_55_35
else:
lowercase__ = 2_55
lowercase__ = True if '''ade''' in model_name else False
lowercase__ = MaskFormerImageProcessor(ignore_index=SCREAMING_SNAKE_CASE , reduce_labels=SCREAMING_SNAKE_CASE )
lowercase__ = image_processor(SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
lowercase__ = model(**SCREAMING_SNAKE_CASE )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
lowercase__ = torch.tensor(
[[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(f'nielsr/{model_name}' )
image_processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 43 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case = 16
_snake_case = 32
def A ( _lowerCamelCase , _lowerCamelCase = 16 ):
'''simple docstring'''
_lowerCAmelCase : int = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCAmelCase : List[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase : List[str] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCAmelCase : List[str] = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase : Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCAmelCase : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCAmelCase : Optional[int] = 16
elif accelerator.mixed_precision != "no":
_lowerCAmelCase : str = 8
else:
_lowerCAmelCase : int = None
return tokenizer.pad(
_lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
_lowerCAmelCase : Optional[Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
_lowerCAmelCase : Dict = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case = mocked_dataloaders # noqa: F811
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCamelCase ) == "1":
_lowerCAmelCase : str = 2
# New Code #
_lowerCAmelCase : Optional[Any] = int(args.gradient_accumulation_steps )
# Initialize accelerator
_lowerCAmelCase : int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_lowerCamelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase : Union[str, Any] = config["lr"]
_lowerCAmelCase : List[Any] = int(config["num_epochs"] )
_lowerCAmelCase : str = int(config["seed"] )
_lowerCAmelCase : str = int(config["batch_size"] )
_lowerCAmelCase : int = evaluate.load("glue" , "mrpc" )
set_seed(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = get_dataloaders(_lowerCamelCase , _lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase : Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase : Dict = AdamW(params=model.parameters() , lr=_lowerCamelCase )
# Instantiate scheduler
_lowerCAmelCase : Any = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = model(**_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = output.loss
accelerator.backward(_lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase : str = model(**_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase , _lowerCAmelCase : str = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
_lowerCAmelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , _lowerCamelCase )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=_lowerCamelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_lowerCAmelCase : str = parser.parse_args()
_lowerCAmelCase : List[Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 500 | 0 |
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowercase_ : List[str] = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def lowerCAmelCase (__A , __A , __A , __A , __A , __A):
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''')
if not ops[op](version.parse(_lowerCamelCase) , version.parse(_lowerCamelCase)):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''')
def lowerCAmelCase (__A , __A = None):
"""simple docstring"""
_a = F'''\n{hint}''' if hint is not None else """"""
# non-versioned check
if re.match(r'''^[\w_\-\d]+$''' , _lowerCamelCase):
_a = requirement, None, None
else:
_a = re.findall(r'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , _lowerCamelCase)
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F''' got {requirement}''')
_a = match[0]
_a = want_full.split(''',''') # there could be multiple requirements
_a = {}
for w in want_range:
_a = re.findall(r'''^([\s!=<>]{1,2})(.+)''' , _lowerCamelCase)
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F''' but got {requirement}''')
_a = match[0]
_a = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys())}, but got {op}''')
# special case
if pkg == "python":
_a = """.""".join([str(_lowerCamelCase) for x in sys.version_info[:3]])
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
return
# check if any version is installed
try:
_a = importlib.metadata.version(_lowerCamelCase)
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''')
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
def lowerCAmelCase (__A):
"""simple docstring"""
_a = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(_lowerCamelCase , _lowerCamelCase)
| 719 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowercase_ = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Dict = 'facebook/nllb-200-distilled-600M'
__lowerCamelCase : Optional[Any] = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__lowerCamelCase : Optional[int] = 'translator'
__lowerCamelCase : int = AutoTokenizer
__lowerCamelCase : List[Any] = AutoModelForSeqaSeqLM
__lowerCamelCase : int = LANGUAGE_CODES
__lowerCamelCase : Tuple = ['text', 'text', 'text']
__lowerCamelCase : Optional[Any] = ['text']
def a__ (self , A , A , A ) -> List[str]:
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'''{tgt_lang} is not a supported language.''' )
_a = self.lang_to_code[src_lang]
_a = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
A , return_tensors='''pt''' , src_lang=A , tgt_lang=A )
def a__ (self , A ) -> Optional[Any]:
"""simple docstring"""
return self.model.generate(**A )
def a__ (self , A ) -> List[str]:
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=A )
| 352 | 0 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
if gpta_config_file == "":
lowerCamelCase__ = GPTaConfig()
else:
lowerCamelCase__ = GPTaConfig.from_json_file(__snake_case )
lowerCamelCase__ = GPTaModel(__snake_case )
# Load weights from numpy
load_tf_weights_in_gpta(__snake_case ,__snake_case ,__snake_case )
# Save pytorch-model
lowerCamelCase__ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowerCamelCase__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() ,__snake_case )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(__snake_case ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
_a = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 481 |
from math import factorial
_a = {str(digit): factorial(digit) for digit in range(10)}
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
if not isinstance(__snake_case ,__snake_case ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__snake_case ) )
def lowerCAmelCase__(__snake_case = 60 ,__snake_case = 1000000 ) -> int:
'''simple docstring'''
if not isinstance(__snake_case ,__snake_case ) or not isinstance(__snake_case ,__snake_case ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
lowerCamelCase__ = 0
# the cached sizes of the previous chains
lowerCamelCase__ = {}
for start_chain_element in range(1 ,__snake_case ):
# The temporary set will contain the elements of the chain
lowerCamelCase__ = set()
lowerCamelCase__ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCamelCase__ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__snake_case )
chain_set_length += 1
lowerCamelCase__ = digit_factorial_sum(__snake_case )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCamelCase__ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution()}""")
| 481 | 1 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCamelCase ( _A , _A=7 ):
lowerCAmelCase_ = None
if token is not None:
lowerCAmelCase_ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"Bearer {token}"}
# The id of a workflow (not of a workflow run)
lowerCAmelCase_ = '''636036'''
lowerCAmelCase_ = f"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
lowerCAmelCase_ = requests.get(_A , headers=_A ).json()
return result["workflow_runs"]
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = get_daily_ci_runs(_A )
lowerCAmelCase_ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowerCAmelCase_ = workflow_run['''id''']
break
return workflow_run_id
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = get_last_daily_ci_runs(_A )
if workflow_run_id is not None:
lowerCAmelCase_ = get_artifacts_links(worflow_run_id=_A , token=_A )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowerCAmelCase_ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_A , artifact_url=_A , output_dir=_A , token=_A )
def __UpperCamelCase ( _A , _A , _A ):
get_last_daily_ci_artifacts(_A , _A , _A )
lowerCAmelCase_ = {}
for artifact_name in artifact_names:
lowerCAmelCase_ = os.path.join(_A , f"{artifact_name}.zip" )
if os.path.isfile(_A ):
lowerCAmelCase_ = {}
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
with z.open(_A ) as f:
lowerCAmelCase_ = f.read().decode('''UTF-8''' )
return results
| 325 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_A = get_logger(__name__)
class A :
__snake_case = 'dummy_data'
__snake_case = 'datasets'
__snake_case = False
def __init__( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False, UpperCamelCase__ = True, UpperCamelCase__ = None, ):
"""simple docstring"""
lowerCAmelCase_ = 0
lowerCAmelCase_ = dataset_name
lowerCAmelCase_ = cache_dir
lowerCAmelCase_ = use_local_dummy_data
lowerCAmelCase_ = config
# download_callbacks take a single url as input
lowerCAmelCase_ = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowerCAmelCase_ = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowerCAmelCase_ = str(UpperCamelCase__ )
# to be downloaded
lowerCAmelCase_ = None
lowerCAmelCase_ = None
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self._dummy_file is None:
lowerCAmelCase_ = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''', self.config.name, self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''', self.version_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return os.path.join(self.dummy_data_folder, '''dummy_data.zip''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowerCAmelCase_ = cached_path(
UpperCamelCase__, cache_dir=self.cache_dir, extract_compressed_file=UpperCamelCase__, force_extract=UpperCamelCase__ )
return os.path.join(UpperCamelCase__, self.dummy_file_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir, self.dataset_name, self.dummy_zip_file )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self._bucket_url is None:
lowerCAmelCase_ = hf_github_url(self.dataset_name, self.dummy_zip_file.replace(os.sep, '''/''' ) )
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep, '''/''' ).split('''/''' )[:-1] )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, *UpperCamelCase__ ):
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowerCAmelCase_ = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowerCAmelCase_ = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
return self.create_dummy_data_dict(UpperCamelCase__, UpperCamelCase__ )
elif isinstance(UpperCamelCase__, (list, tuple) ):
return self.create_dummy_data_list(UpperCamelCase__, UpperCamelCase__ )
else:
return self.create_dummy_data_single(UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, *UpperCamelCase__ ):
"""simple docstring"""
return self.download_and_extract(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
return self.download_and_extract(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return path
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return {}
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
for single_url in single_urls:
download_callback(UpperCamelCase__ )
else:
lowerCAmelCase_ = single_urls
download_callback(UpperCamelCase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
lowerCAmelCase_ = [os.path.join(UpperCamelCase__, urllib.parse.quote_plus(Path(UpperCamelCase__ ).name ) ) for x in single_urls]
else:
lowerCAmelCase_ = single_urls
lowerCAmelCase_ = os.path.join(UpperCamelCase__, urllib.parse.quote_plus(Path(UpperCamelCase__ ).name ) )
lowerCAmelCase_ = value
# make sure that values are unique
if all(isinstance(UpperCamelCase__, UpperCamelCase__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowerCAmelCase_ = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowerCAmelCase_ = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''', UpperCamelCase__ ) ) for url in data_url )
lowerCAmelCase_ = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowerCAmelCase_ = [data_url[0]] * len(UpperCamelCase__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase_ = os.path.join(UpperCamelCase__, urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(UpperCamelCase__ )
return dummy_data_list
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase_ = os.path.join(UpperCamelCase__, urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(UpperCamelCase__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
def _iter_archive_members(UpperCamelCase__ ):
# this preserves the order of the members inside the ZIP archive
lowerCAmelCase_ = Path(self.dummy_file ).parent
lowerCAmelCase_ = path.relative_to(UpperCamelCase__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowerCAmelCase_ = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCamelCase__ )
lowerCAmelCase_ = Path(UpperCamelCase__ )
lowerCAmelCase_ = _iter_archive_members(UpperCamelCase__ ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(UpperCamelCase__ ).as_posix(), file_path.open('''rb''' )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
lowerCAmelCase_ = [paths]
for path in paths:
if os.path.isfile(UpperCamelCase__ ):
if os.path.basename(UpperCamelCase__ ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCamelCase__ ):
if os.path.basename(UpperCamelCase__ ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(UpperCamelCase__ ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(UpperCamelCase__, UpperCamelCase__ )
| 325 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__a = logging.get_logger(__name__)
__a = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowercase__( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
a :List[Any] = 'nat'
a :List[str] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : int=4 , SCREAMING_SNAKE_CASE_ : Optional[int]=3 , SCREAMING_SNAKE_CASE_ : int=6_4 , SCREAMING_SNAKE_CASE_ : Dict=[3, 4, 6, 5] , SCREAMING_SNAKE_CASE_ : List[str]=[2, 4, 8, 1_6] , SCREAMING_SNAKE_CASE_ : List[str]=7 , SCREAMING_SNAKE_CASE_ : List[Any]=3.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Any=0.0 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Any="gelu" , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Optional[int]=1e-5 , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Optional[Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = depths
lowercase_ = len(SCREAMING_SNAKE_CASE_ )
lowercase_ = num_heads
lowercase_ = kernel_size
lowercase_ = mlp_ratio
lowercase_ = qkv_bias
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = drop_path_rate
lowercase_ = hidden_act
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase_ = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE_ ) - 1) )
lowercase_ = layer_scale_init_value
lowercase_ = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) + 1 )]
lowercase_ , lowercase_ = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 97 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger()
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :Tensor ) -> int:
'''simple docstring'''
snake_case_ : int = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCAmelCase__ )
def __call__( self :List[Any] , lowerCAmelCase__ :Tensor ) -> Union[str, Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def _A ( self :int ) -> List[Any]:
'''simple docstring'''
return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = 42
a__ = 0
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def __call__( self :Tuple , lowerCAmelCase__ :Tensor ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = Tracker(self.dest )(lowerCAmelCase__ ).parametrized
snake_case_ : Tuple = Tracker(self.src )(lowerCAmelCase__ ).parametrized
snake_case_ : List[str] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) )
snake_case_ : Tuple = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while'''
F''' destination module has {len(lowerCAmelCase__ )}.''' )
for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = True )-> Optional[int]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
snake_case_ : List[str] = timm.create_model(__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Optional[int] = ResNetForImageClassification(__magic_name__ ).eval()
snake_case_ : Dict = ModuleTransfer(src=__magic_name__ ,dest=__magic_name__ )
snake_case_ : Optional[int] = torch.randn((1, 3, 224, 224) )
module_transfer(__magic_name__ )
assert torch.allclose(from_model(__magic_name__ ) ,our_model(__magic_name__ ).logits ), "The model logits don't match the original one."
snake_case_ : str = F'''resnet{'-'.join(name.split('resnet' ) )}'''
print(__magic_name__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add model" ,use_temp_dir=__magic_name__ ,)
# we can use the convnext one
snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add image processor" ,use_temp_dir=__magic_name__ ,)
print(F'''Pushed {checkpoint_name}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None ,__magic_name__ = True )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = "imagenet-1k-id2label.json"
snake_case_ : Optional[Any] = 1000
snake_case_ : List[Any] = (1, num_labels)
snake_case_ : Optional[Any] = "huggingface/label-files"
snake_case_ : Dict = num_labels
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : Any = idalabel
snake_case_ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case_ : Optional[int] = partial(__magic_name__ ,num_labels=__magic_name__ ,idalabel=__magic_name__ ,labelaid=__magic_name__ )
snake_case_ : Optional[int] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(__magic_name__ ,names_to_config[model_name] ,__magic_name__ ,__magic_name__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__lowerCamelCase : Tuple = parser.parse_args()
__lowerCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 653 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __magic_name__ :
def __init__( self : List[str] , snake_case__ : str , snake_case__ : Any=9_9 , snake_case__ : List[str]=1_3 , snake_case__ : Union[str, Any]=7 , snake_case__ : Optional[int]=9 , snake_case__ : Tuple=True , snake_case__ : int=True , snake_case__ : Tuple=False , snake_case__ : List[str]=3_2 , snake_case__ : Union[str, Any]=5 , snake_case__ : str=4 , snake_case__ : List[Any]=3_7 , snake_case__ : List[Any]=8 , snake_case__ : Any=0.1 , snake_case__ : Optional[Any]=0.0_02 , snake_case__ : Dict=1 , snake_case__ : str=0 , snake_case__ : str=0 , snake_case__ : str=None , snake_case__ : List[Any]=None , ):
'''simple docstring'''
lowercase :Any = parent
lowercase :Optional[Any] = batch_size
lowercase :Optional[int] = encoder_seq_length
lowercase :List[str] = decoder_seq_length
# For common tests
lowercase :Tuple = self.decoder_seq_length
lowercase :List[str] = is_training
lowercase :Dict = use_attention_mask
lowercase :Tuple = use_labels
lowercase :Tuple = vocab_size
lowercase :int = hidden_size
lowercase :Union[str, Any] = num_hidden_layers
lowercase :str = num_attention_heads
lowercase :str = d_ff
lowercase :Dict = relative_attention_num_buckets
lowercase :int = dropout_rate
lowercase :Dict = initializer_factor
lowercase :Optional[int] = eos_token_id
lowercase :Any = pad_token_id
lowercase :Dict = decoder_start_token_id
lowercase :Union[str, Any] = None
lowercase :List[str] = decoder_layers
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return TaConfig.from_pretrained('''google/umt5-base''' )
def __snake_case ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : Dict=None , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[Any]=None , snake_case__ : Dict=None , snake_case__ : Tuple=None , ):
'''simple docstring'''
if attention_mask is None:
lowercase :Dict = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowercase :List[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowercase :List[Any] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=snake_case__ )
if decoder_head_mask is None:
lowercase :List[str] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=snake_case__ )
if cross_attn_head_mask is None:
lowercase :Tuple = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=snake_case__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Union[str, Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowercase :Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowercase :Any = input_ids.clamp(self.pad_token_id + 1 )
lowercase :Dict = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowercase :str = self.get_config()
lowercase :Dict = config.num_attention_heads
lowercase :Tuple = self.prepare_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, input_dict
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase , lowercase :int = self.prepare_config_and_inputs()
return config, inputs_dict
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __snake_case ( self : str ):
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __snake_case ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Tuple , ):
'''simple docstring'''
lowercase :int = UMTaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase :Union[str, Any] = model(
input_ids=snake_case__ , decoder_input_ids=snake_case__ , attention_mask=snake_case__ , decoder_attention_mask=snake_case__ , )
lowercase :Dict = model(input_ids=snake_case__ , decoder_input_ids=snake_case__ )
lowercase :Optional[Any] = result.last_hidden_state
lowercase :List[Any] = result.past_key_values
lowercase :List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(snake_case__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __snake_case ( self : Dict , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : str , ):
'''simple docstring'''
lowercase :Dict = UMTaModel(config=snake_case__ ).get_decoder().to(snake_case__ ).eval()
# first forward pass
lowercase :int = model(snake_case__ , use_cache=snake_case__ )
lowercase :Optional[int] = model(snake_case__ )
lowercase :Optional[int] = model(snake_case__ , use_cache=snake_case__ )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) + 1 )
lowercase , lowercase :Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase :List[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowercase :Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase :Tuple = model(snake_case__ )['''last_hidden_state''']
lowercase :Tuple = model(snake_case__ , past_key_values=snake_case__ )['''last_hidden_state''']
# select random slice
lowercase :Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase :Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
lowercase :Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
def __snake_case ( self : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , ):
'''simple docstring'''
lowercase :Dict = UMTaModel(config=snake_case__ ).to(snake_case__ ).half().eval()
lowercase :Dict = model(**snake_case__ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(snake_case__ ).any().item() )
@require_torch
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__A : int = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__A : Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__A : List[str] = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__A : str = True
__A : Any = False
__A : int = False
__A : Union[str, Any] = True
__A : Optional[Any] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__A : Any = [0.8, 0.9]
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :str = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :List[str] = self.model_tester.prepare_config_and_inputs()
lowercase :int = UMTaModel(config_and_inputs[0] ).to(snake_case__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
snake_case__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=snake_case__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*snake_case__ )
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Any = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
lowercase :List[str] = self.model_tester.prepare_config_and_inputs()
lowercase :Dict = config_and_inputs[0]
lowercase :Optional[int] = UMTaForConditionalGeneration(snake_case__ ).eval()
model.to(snake_case__ )
lowercase :int = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=snake_case__ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case__ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case__ ),
}
for attn_name, (name, mask) in zip(snake_case__ , head_masking.items() ):
lowercase :Dict = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowercase :Optional[int] = torch.ones(
config.num_decoder_layers , config.num_heads , device=snake_case__ )
lowercase :Union[str, Any] = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=snake_case__ , return_dict_in_generate=snake_case__ , **snake_case__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowercase :Optional[Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def __snake_case ( self : Dict ):
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def __snake_case ( self : int ):
'''simple docstring'''
lowercase :Optional[int] = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=snake_case__ ).to(snake_case__ )
lowercase :Any = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=snake_case__ , legacy=snake_case__ )
lowercase :Union[str, Any] = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
lowercase :List[Any] = tokenizer(snake_case__ , return_tensors='''pt''' , padding=snake_case__ ).input_ids
# fmt: off
lowercase :Any = torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
] )
# fmt: on
torch.testing.assert_allclose(snake_case__ , snake_case__ )
lowercase :Any = model.generate(input_ids.to(snake_case__ ) )
lowercase :Any = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
lowercase :int = tokenizer.batch_decode(snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
| 475 |
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ):
__A : str = FlaxAutoencoderKL
@property
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Dict = 4
lowercase :Optional[int] = 3
lowercase :Any = (3_2, 3_2)
lowercase :Optional[int] = jax.random.PRNGKey(0 )
lowercase :Any = jax.random.uniform(snake_case__ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :List[Any] = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
lowercase :Optional[int] = self.dummy_input
return init_dict, inputs_dict
| 475 | 1 |
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 660 | '''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 100 , ):
UpperCAmelCase_ = x_start
UpperCAmelCase_ = fnc(A_ )
UpperCAmelCase_ = 0.0
for _ in range(A_ ):
# Approximates curve as a sequence of linear lines and sums their length
UpperCAmelCase_ = (x_end - x_start) / steps + xa
UpperCAmelCase_ = fnc(A_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
UpperCAmelCase_ = xa
UpperCAmelCase_ = fxa
return length
if __name__ == "__main__":
def lowerCamelCase__ ( A_ ):
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
__snake_case : List[Any] = 10
while i <= 10_00_00:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 660 | 1 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=True ) -> Any:
model.train()
A_ = model(UpperCAmelCase__ )
A_ = F.mse_loss(UpperCAmelCase__, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=False ) -> List[Any]:
set_seed(42 )
A_ = RegressionModel()
A_ = deepcopy(UpperCAmelCase__ )
A_ = RegressionDataset(length=80 )
A_ = DataLoader(UpperCAmelCase__, batch_size=16 )
model.to(accelerator.device )
if sched:
A_ = AdamW(params=model.parameters(), lr=1e-3 )
A_ = AdamW(params=ddp_model.parameters(), lr=1e-3 )
A_ = LambdaLR(UpperCAmelCase__, lr_lambda=lambda UpperCAmelCase__ : epoch**0.65 )
A_ = LambdaLR(UpperCAmelCase__, lr_lambda=lambda UpperCAmelCase__ : epoch**0.65 )
# Make a copy of `model`
if sched:
A_ , A_ , A_ , A_ = accelerator.prepare(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ , A_ = accelerator.prepare(UpperCAmelCase__, UpperCAmelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
# Test when on a single CPU or GPU that the context manager does nothing
A_ , A_ , A_ = get_training_setup(UpperCAmelCase__ )
# Use a single batch
A_ , A_ = next(iter(UpperCAmelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
A_ , A_ = accelerator.gather((ddp_input, ddp_target) )
A_ , A_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCAmelCase__ ):
step_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
# Sync grads
step_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
A_ = ddp_input[torch.randperm(len(UpperCAmelCase__ ) )]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[str]:
# Test on distributed setup that context manager behaves properly
A_ , A_ , A_ = get_training_setup(UpperCAmelCase__ )
# Use a single batch
A_ , A_ = next(iter(UpperCAmelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
A_ , A_ = accelerator.gather((ddp_input, ddp_target) )
A_ , A_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCAmelCase__ ):
step_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
# Sync grads
step_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
A_ = ddp_input[torch.randperm(len(UpperCAmelCase__ ) )]
def UpperCAmelCase__ ( UpperCAmelCase__=False, UpperCAmelCase__=False ) -> int:
A_ = Accelerator(
split_batches=UpperCAmelCase__, dispatch_batches=UpperCAmelCase__, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
A_ , A_ , A_ = get_training_setup(UpperCAmelCase__ )
for iteration, batch in enumerate(UpperCAmelCase__ ):
A_ , A_ = batch.values()
# Gather the distributed inputs and targs for the base model
A_ , A_ = accelerator.gather((ddp_input, ddp_target) )
A_ , A_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCAmelCase__ ):
step_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCAmelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
A_ = ddp_input[torch.randperm(len(UpperCAmelCase__ ) )]
GradientState._reset_state()
def UpperCAmelCase__ ( UpperCAmelCase__=False, UpperCAmelCase__=False ) -> str:
A_ = Accelerator(
split_batches=UpperCAmelCase__, dispatch_batches=UpperCAmelCase__, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
A_ , A_ , A_ , A_ , A_ , A_ , A_ = get_training_setup(UpperCAmelCase__, UpperCAmelCase__ )
for iteration, batch in enumerate(UpperCAmelCase__ ):
A_ , A_ = batch.values()
# Gather the distributed inputs and targs for the base model
A_ , A_ = accelerator.gather((ddp_input, ddp_target) )
A_ , A_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCAmelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCAmelCase__ ):
step_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
A_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCAmelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def UpperCAmelCase__ ( ) -> Any:
A_ = Accelerator()
A_ = RegressionDataset(length=80 )
A_ = DataLoader(UpperCAmelCase__, batch_size=16 )
A_ = RegressionDataset(length=96 )
A_ = DataLoader(UpperCAmelCase__, batch_size=16 )
A_ , A_ = accelerator.prepare(UpperCAmelCase__, UpperCAmelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCAmelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase__ )
if iteration < len(UpperCAmelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCAmelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase__ )
if batch_num < len(UpperCAmelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def UpperCAmelCase__ ( ) -> Dict:
A_ = Accelerator()
A_ = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(UpperCAmelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(UpperCAmelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation(UpperCAmelCase__, UpperCAmelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""", """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, """`split_batches=False`, `dispatch_batches=False`**""", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation_with_opt_and_scheduler(UpperCAmelCase__, UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 667 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if num < 0:
return False
A_ = num
A_ = 0
while num > 0:
A_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case : List[str] = ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]" )
snake_case : Any = parser.add_subparsers(help="diffusers-cli command helpers" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase )
# Let's go
snake_case : Any = parser.parse_args()
if not hasattr(lowercase , "func" ):
parser.print_help()
exit(1 )
# Run
snake_case : Optional[int] = args.func(lowercase )
service.run()
if __name__ == "__main__":
main()
| 178 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : List[str] = (KDPMaDiscreteScheduler,)
__UpperCAmelCase : List[Any] = 10
def lowerCamelCase ( self , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = {
"num_train_timesteps": 1100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**UpperCamelCase__ )
return config
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : str = self.scheduler_classes[0]
snake_case : str = self.get_scheduler_config(prediction_type="v_prediction" )
snake_case : Dict = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
snake_case : List[Any] = self.dummy_model()
snake_case : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case : Optional[Any] = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case : int = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
snake_case : int = model(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[Any] = output.prev_sample
snake_case : Optional[Any] = torch.sum(torch.abs(UpperCamelCase__ ) )
snake_case : Any = torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
if torch_device == "mps":
return
snake_case : Optional[Any] = self.scheduler_classes[0]
snake_case : Optional[int] = self.get_scheduler_config()
snake_case : Dict = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
snake_case : List[Any] = self.dummy_model()
snake_case : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case : List[str] = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case : List[Any] = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[int] = model(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[int] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case : int = output.prev_sample
snake_case : int = torch.sum(torch.abs(UpperCamelCase__ ) )
snake_case : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
if torch_device == "mps":
return
snake_case : Any = self.scheduler_classes[0]
snake_case : Any = self.get_scheduler_config()
snake_case : Union[str, Any] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ )
snake_case : Optional[Any] = self.dummy_model()
snake_case : Optional[int] = self.dummy_sample_deter.to(UpperCamelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
snake_case : Optional[Any] = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Any = model(UpperCamelCase__ , UpperCamelCase__ )
snake_case : List[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case : Dict = output.prev_sample
snake_case : Dict = torch.sum(torch.abs(UpperCamelCase__ ) )
snake_case : str = torch.mean(torch.abs(UpperCamelCase__ ) )
if str(UpperCamelCase__ ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 178 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
UpperCamelCase_ = 10
def _UpperCAmelCase ( A , A , A , A ):
'''simple docstring'''
for i in range(A , A ):
if array[i] == target:
return i
return -1
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
UpperCAmelCase__ =0
UpperCAmelCase__ =len(A )
while left <= right:
if right - left < precision:
return lin_search(A , A , A , A )
UpperCAmelCase__ =(left + right) // 3 + 1
UpperCAmelCase__ =2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
UpperCAmelCase__ =one_third - 1
elif array[two_third] < target:
UpperCAmelCase__ =two_third + 1
else:
UpperCAmelCase__ =one_third + 1
UpperCAmelCase__ =two_third - 1
else:
return -1
def _UpperCAmelCase ( A , A , A , A ):
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(A , A , A , A )
UpperCAmelCase__ =(left + right) // 3 + 1
UpperCAmelCase__ =2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(A , one_third - 1 , A , A )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , A , A , A )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , A , A )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = input('Enter numbers separated by comma:\n').strip()
UpperCamelCase_ = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
UpperCamelCase_ = int(input('Enter the number to be found in the list:\n').strip())
UpperCamelCase_ = ite_ternary_search(collection, target)
UpperCamelCase_ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print('Not found')
| 510 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'blip_text_model'
def __init__( self, A_=3_0524, A_=768, A_=768, A_=3072, A_=768, A_=12, A_=8, A_=512, A_="gelu", A_=1E-12, A_=0.0, A_=0.0, A_=0.02, A_=3_0522, A_=2, A_=0, A_=102, A_=True, A_=True, **A_, ) -> Any:
super().__init__(
pad_token_id=A_, bos_token_id=A_, eos_token_id=A_, sep_token_id=A_, **A_, )
UpperCAmelCase__ =vocab_size
UpperCAmelCase__ =hidden_size
UpperCAmelCase__ =encoder_hidden_size
UpperCAmelCase__ =intermediate_size
UpperCAmelCase__ =projection_dim
UpperCAmelCase__ =hidden_dropout_prob
UpperCAmelCase__ =num_hidden_layers
UpperCAmelCase__ =num_attention_heads
UpperCAmelCase__ =max_position_embeddings
UpperCAmelCase__ =layer_norm_eps
UpperCAmelCase__ =hidden_act
UpperCAmelCase__ =initializer_range
UpperCAmelCase__ =attention_probs_dropout_prob
UpperCAmelCase__ =is_decoder
UpperCAmelCase__ =use_cache
@classmethod
def __UpperCAmelCase ( cls, A_, **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
UpperCAmelCase__ , UpperCAmelCase__ =cls.get_config_dict(A_, **A_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
UpperCAmelCase__ =config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls, "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(A_, **A_ )
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'blip_vision_model'
def __init__( self, A_=768, A_=3072, A_=512, A_=12, A_=12, A_=384, A_=16, A_="gelu", A_=1E-5, A_=0.0, A_=1E-10, **A_, ) -> Dict:
super().__init__(**A_ )
UpperCAmelCase__ =hidden_size
UpperCAmelCase__ =intermediate_size
UpperCAmelCase__ =projection_dim
UpperCAmelCase__ =num_hidden_layers
UpperCAmelCase__ =num_attention_heads
UpperCAmelCase__ =patch_size
UpperCAmelCase__ =image_size
UpperCAmelCase__ =initializer_range
UpperCAmelCase__ =attention_dropout
UpperCAmelCase__ =layer_norm_eps
UpperCAmelCase__ =hidden_act
@classmethod
def __UpperCAmelCase ( cls, A_, **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
UpperCAmelCase__ , UpperCAmelCase__ =cls.get_config_dict(A_, **A_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
UpperCAmelCase__ =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls, "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(A_, **A_ )
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'blip'
__UpperCamelCase = True
def __init__( self, A_=None, A_=None, A_=512, A_=2.65_92, A_=256, **A_, ) -> str:
super().__init__(**A_ )
if text_config is None:
UpperCAmelCase__ ={}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
UpperCAmelCase__ ={}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
UpperCAmelCase__ =BlipTextConfig(**A_ )
UpperCAmelCase__ =BlipVisionConfig(**A_ )
UpperCAmelCase__ =self.vision_config.hidden_size
UpperCAmelCase__ =projection_dim
UpperCAmelCase__ =logit_scale_init_value
UpperCAmelCase__ =1.0
UpperCAmelCase__ =0.02
UpperCAmelCase__ =image_text_hidden_size
@classmethod
def __UpperCAmelCase ( cls, A_, A_, **A_ ) -> Tuple:
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **A_ )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ =copy.deepcopy(self.__dict__ )
UpperCAmelCase__ =self.text_config.to_dict()
UpperCAmelCase__ =self.vision_config.to_dict()
UpperCAmelCase__ =self.__class__.model_type
return output
| 510 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Optional[int] = logging.get_logger(__name__)
__lowercase : Optional[Any] = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Any = "luke"
def __init__( self , UpperCamelCase__=50_267 , UpperCamelCase__=500_000 , UpperCamelCase__=768 , UpperCamelCase__=256 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3_072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , **UpperCamelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = entity_vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = entity_emb_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = use_entity_aware_attention
lowerCamelCase_ = classifier_dropout | 142 |
"""simple docstring"""
__lowercase : Union[str, Any] = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
__lowercase : Any = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 1_2,
"""Pm""": 1_5,
"""Em""": 1_8,
"""Zm""": 2_1,
"""Ym""": 2_4,
}
def lowerCamelCase_ ( _lowerCamelCase : float , _lowerCamelCase : str , _lowerCamelCase : str ):
lowerCamelCase_ = from_type.lower().strip('''s''' )
lowerCamelCase_ = to_type.lower().strip('''s''' )
lowerCamelCase_ = UNIT_SYMBOL.get(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = UNIT_SYMBOL.get(_lowerCamelCase , _lowerCamelCase )
if from_sanitized not in METRIC_CONVERSION:
lowerCamelCase_ = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(_lowerCamelCase )}"""
)
raise ValueError(_lowerCamelCase )
if to_sanitized not in METRIC_CONVERSION:
lowerCamelCase_ = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(_lowerCamelCase )}"""
)
raise ValueError(_lowerCamelCase )
lowerCamelCase_ = METRIC_CONVERSION[from_sanitized]
lowerCamelCase_ = METRIC_CONVERSION[to_sanitized]
lowerCamelCase_ = 1
if from_exponent > to_exponent:
lowerCamelCase_ = from_exponent - to_exponent
else:
lowerCamelCase_ = -(to_exponent - from_exponent)
return value * pow(1_0 , _lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod() | 142 | 1 |
'''simple docstring'''
import argparse
import os
import re
a_ = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ = re.compile(r'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
a_ = re.compile(r'''\s*\(\s*\"(\S[^\"]+)\"''')
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : int = False ) -> Optional[int]:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" ) as f:
lowerCAmelCase__ = f.read()
lowerCAmelCase__ = content.split("\n" )
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while line_idx < len(UpperCamelCase_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowerCAmelCase__ = len(re.search(R"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowerCAmelCase__ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowerCAmelCase__ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowerCAmelCase__ = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : _re_identifier.search(UpperCamelCase_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write("\n".join(UpperCamelCase_ ) )
elif "\n".join(UpperCamelCase_ ) != content:
return True
def _a ( UpperCamelCase_ : int = False ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = [os.path.join(UpperCamelCase_ , UpperCamelCase_ ) for f in os.listdir(UpperCamelCase_ ) if f.endswith(".py" )]
lowerCAmelCase__ = [sort_auto_mapping(UpperCamelCase_ , overwrite=UpperCamelCase_ ) for fname in fnames]
if not overwrite and any(UpperCamelCase_ ):
lowerCAmelCase__ = [f for f, d in zip(UpperCamelCase_ , UpperCamelCase_ ) if d]
raise ValueError(
F"The following files have auto mappings that need sorting: {', '.join(UpperCamelCase_ )}. Run `make style` to fix"
" this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
a_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 701 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 115 | 0 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCAmelCase_ ( __A , __A ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , UpperCAmelCase : int = 768 , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowercase : Optional[int] =nn.Parameter(torch.zeros(1 , UpperCAmelCase ) )
lowercase : Tuple =nn.Parameter(torch.ones(1 , UpperCAmelCase ) )
def A__ ( self : Union[str, Any] , UpperCAmelCase : Optional[Union[str, torch.device]] = None , UpperCAmelCase : Optional[torch.dtype] = None , ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =nn.Parameter(self.mean.to(UpperCAmelCase ).to(UpperCAmelCase ) )
lowercase : Union[str, Any] =nn.Parameter(self.std.to(UpperCAmelCase ).to(UpperCAmelCase ) )
return self
def A__ ( self : int , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =(embeds - self.mean) * 1.0 / self.std
return embeds
def A__ ( self : int , UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =(embeds * self.std) + self.mean
return embeds
| 94 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__SCREAMING_SNAKE_CASE :int = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__SCREAMING_SNAKE_CASE :Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def UpperCAmelCase_ ( __lowercase : str , __lowercase : Union[str, Any]=100 , __lowercase : Dict=" " ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = text.split(__lowercase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__lowercase ) , __lowercase )]
def UpperCAmelCase_ ( __lowercase : dict ) -> dict:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(__lowercase ):
titles.append(title if title is not None else "" )
texts.append(__lowercase )
return {"title": titles, "text": texts}
def UpperCAmelCase_ ( __lowercase : dict , __lowercase : DPRContextEncoder , __lowercase : DPRContextEncoderTokenizerFast ) -> dict:
'''simple docstring'''
_UpperCAmelCase = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=__lowercase , padding="longest" , return_tensors="pt" )["input_ids"]
_UpperCAmelCase = ctx_encoder(input_ids.to(device=__lowercase ) , return_dict=__lowercase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCAmelCase_ ( __lowercase : "RagExampleArguments" , __lowercase : "ProcessingArguments" , __lowercase : "IndexHnswArguments" , ) -> Any:
'''simple docstring'''
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_UpperCAmelCase = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_UpperCAmelCase = dataset.map(__lowercase , batched=__lowercase , num_proc=processing_args.num_proc )
# And compute the embeddings
_UpperCAmelCase = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__lowercase )
_UpperCAmelCase = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_UpperCAmelCase = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
_UpperCAmelCase = dataset.map(
partial(__lowercase , ctx_encoder=__lowercase , ctx_tokenizer=__lowercase ) , batched=__lowercase , batch_size=processing_args.batch_size , features=__lowercase , )
# And finally save your dataset
_UpperCAmelCase = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(__lowercase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_UpperCAmelCase = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=__lowercase )
# And save the index
_UpperCAmelCase = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(__lowercase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class A_ :
_lowerCamelCase : str = field(
default=str(Path(lowerCAmelCase_ ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
_lowerCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
_lowerCamelCase : str = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
_lowerCamelCase : str = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
_lowerCamelCase : Optional[str] = field(
default=str(Path(lowerCAmelCase_ ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class A_ :
_lowerCamelCase : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
_lowerCamelCase : int = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class A_ :
_lowerCamelCase : int = field(
default=7_68 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
_lowerCamelCase : int = field(
default=1_28 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__SCREAMING_SNAKE_CASE :Tuple = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Tuple = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE :Optional[int] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 236 | 0 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__A =logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase = 101 ) -> List[Any]:
lowerCamelCase_ = length
def __len__( self ) -> Optional[int]:
return self.length
def __getitem__( self , lowercase ) -> int:
return i
class _SCREAMING_SNAKE_CASE :
def __call__( self , lowercase ) -> int:
return {"input_ids": torch.tensor(lowercase ), "labels": torch.tensor(lowercase )}
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self ) -> Optional[Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowerCamelCase_ = nn.Linear(120 , 80 )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=None ) -> List[Any]:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@require_torch_neuroncore
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = f'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f'--output_dir {output_dir}'.split()
lowerCamelCase_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(lowercase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = f'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f'--output_dir {output_dir}'.split()
lowerCamelCase_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(lowercase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__A =HfArgumentParser((TrainingArguments,))
__A =parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_0_1, 4_0, 7]:
__A =DummyDataset(dataset_length)
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = list(range(len(lowerCamelCase__ ) ) )
lowerCamelCase_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
F'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
__A =Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__A =trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__A =trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__A =2
__A =trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__A =trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__A =None
| 313 |
import numpy as np
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = int(np.ceil((x_end - xa) / h ) )
lowerCamelCase_ = np.zeros((n + 1,) )
lowerCamelCase_ = ya
lowerCamelCase_ = xa
for k in range(lowerCamelCase__ ):
lowerCamelCase_ = f(lowerCamelCase__ , y[k] )
lowerCamelCase_ = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowerCamelCase_ = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowerCamelCase_ = f(x + h , y[k] + h * ka )
lowerCamelCase_ = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313 | 1 |
"""simple docstring"""
import numpy as np
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :int = int(np.ceil((x_end - xa) / h ) )
lowerCAmelCase__ :Any = np.zeros((n + 1,) )
lowerCAmelCase__ :List[Any] = ya
lowerCAmelCase__ :List[Any] = xa
for k in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[int] = f(_SCREAMING_SNAKE_CASE , y[k] )
lowerCAmelCase__ :Union[str, Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowerCAmelCase__ :int = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowerCAmelCase__ :Optional[int] = f(x + h , y[k] + h * ka )
lowerCAmelCase__ :Dict = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 | 0 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.2 , SCREAMING_SNAKE_CASE__=0.2 ):
'''simple docstring'''
snake_case: Optional[int] = bp_numa
snake_case: int = bp_numa
snake_case: Optional[int] = bp_numa
snake_case: List[str] = conva_get[:2]
snake_case: str = conva_get[2]
snake_case: Tuple = size_pa
snake_case: Union[str, Any] = rate_w
snake_case: Optional[Any] = rate_t
snake_case: str = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
snake_case: Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
snake_case: List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
snake_case: Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1
snake_case: Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
snake_case: List[Any] = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: int = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowercase_ , 'wb' ) as f:
pickle.dump(lowercase_ , lowercase_ )
print(F"""Model saved: {save_path}""" )
@classmethod
def _UpperCamelCase ( cls , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
with open(lowercase_ , 'rb' ) as f:
snake_case: List[Any] = pickle.load(lowercase_ ) # noqa: S301
snake_case: Any = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
snake_case: Tuple = model_dic.get('size_pooling1' )
snake_case: Tuple = model_dic.get('num_bp1' )
snake_case: Dict = model_dic.get('num_bp2' )
snake_case: Tuple = model_dic.get('num_bp3' )
snake_case: List[str] = model_dic.get('rate_weight' )
snake_case: Optional[int] = model_dic.get('rate_thre' )
# create model instance
snake_case: List[str] = CNN(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# modify model parameter
snake_case: int = model_dic.get('w_conv1' )
snake_case: str = model_dic.get('wkj' )
snake_case: List[Any] = model_dic.get('vji' )
snake_case: Union[str, Any] = model_dic.get('thre_conv1' )
snake_case: Union[str, Any] = model_dic.get('thre_bp2' )
snake_case: List[str] = model_dic.get('thre_bp3' )
return conv_ins
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return round(lowercase_ , 3 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: List[str] = convs[0]
snake_case: Tuple = convs[1]
snake_case: Tuple = np.shape(lowercase_ )[0]
# get the data slice of original image data, data_focus
snake_case: Dict = []
for i_focus in range(0 , size_data - size_conv + 1 , lowercase_ ):
for j_focus in range(0 , size_data - size_conv + 1 , lowercase_ ):
snake_case: Optional[int] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowercase_ )
# calculate the feature map of every single kernel, and saved as list of matrix
snake_case: List[Any] = []
snake_case: int = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowercase_ ):
snake_case: Any = []
for i_focus in range(len(lowercase_ ) ):
snake_case: List[Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowercase_ ) )
snake_case: Optional[int] = np.asmatrix(lowercase_ ).reshape(
lowercase_ , lowercase_ )
data_featuremap.append(lowercase_ )
# expanding the data slice to One dimenssion
snake_case: Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowercase_ ) )
snake_case: Tuple = np.asarray(lowercase_ )
return focus_list, data_featuremap
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="average_pool" ):
'''simple docstring'''
snake_case: int = len(featuremaps[0] )
snake_case: Optional[int] = int(size_map / size_pooling )
snake_case: Dict = []
for i_map in range(len(lowercase_ ) ):
snake_case: Any = featuremaps[i_map]
snake_case: List[Any] = []
for i_focus in range(0 , lowercase_ , lowercase_ ):
for j_focus in range(0 , lowercase_ , lowercase_ ):
snake_case: List[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowercase_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowercase_ ) )
snake_case: int = np.asmatrix(lowercase_ ).reshape(lowercase_ , lowercase_ )
featuremap_pooled.append(lowercase_ )
return featuremap_pooled
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[int] = []
for i in range(len(lowercase_ ) ):
snake_case: List[str] = np.shape(data[i] )
snake_case: Optional[Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
snake_case: int = data_listed.getA().tolist()[0]
data_expanded.extend(lowercase_ )
snake_case: Union[str, Any] = np.asarray(lowercase_ )
return data_expanded
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[int] = np.asarray(lowercase_ )
snake_case: Optional[Any] = np.shape(lowercase_ )
snake_case: Tuple = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Tuple = []
snake_case: List[str] = 0
for i_map in range(lowercase_ ):
snake_case: Any = np.ones((size_map, size_map) )
for i in range(0 , lowercase_ , lowercase_ ):
for j in range(0 , lowercase_ , lowercase_ ):
snake_case: List[str] = pd_pool[
i_pool
]
snake_case: Tuple = i_pool + 1
snake_case: Dict = np.multiply(
lowercase_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(lowercase_ )
return pd_all
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=bool ):
'''simple docstring'''
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(lowercase_ )) )
print((' - - Shape: Teach_Data ', np.shape(lowercase_ )) )
snake_case: Tuple = 0
snake_case: Optional[Any] = []
snake_case: str = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
snake_case: Any = 0
print(F"""-------------Learning Time {rp}--------------""" )
for p in range(len(lowercase_ ) ):
# print('------------Learning Image: %d--------------'%p)
snake_case: Any = np.asmatrix(datas_train[p] )
snake_case: Optional[int] = np.asarray(datas_teach[p] )
snake_case: List[Any] = self.convolute(
lowercase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
snake_case: List[Any] = self.pooling(lowercase_ , self.size_poolinga )
snake_case: List[str] = np.shape(lowercase_ )
snake_case: Dict = self._expand(lowercase_ )
snake_case: Tuple = data_bp_input
snake_case: Union[str, Any] = np.dot(lowercase_ , self.vji.T ) - self.thre_bpa
snake_case: Tuple = self.sig(lowercase_ )
snake_case: Tuple = np.dot(lowercase_ , self.wkj.T ) - self.thre_bpa
snake_case: Tuple = self.sig(lowercase_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
snake_case: int = np.multiply(
(data_teach - bp_outa) , np.multiply(lowercase_ , (1 - bp_outa) ) )
snake_case: str = np.multiply(
np.dot(lowercase_ , self.wkj ) , np.multiply(lowercase_ , (1 - bp_outa) ) )
snake_case: List[Any] = np.dot(lowercase_ , self.vji )
snake_case: Optional[int] = pd_i_all / (self.size_poolinga * self.size_poolinga)
snake_case: Tuple = pd_conva_pooled.T.getA().tolist()
snake_case: Any = self._calculate_gradient_from_pool(
lowercase_ , lowercase_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
snake_case: Dict = self._expand_mat(pd_conva_all[k_conv] )
snake_case: Union[str, Any] = self.rate_weight * np.dot(lowercase_ , lowercase_ )
snake_case: str = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
snake_case: Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
snake_case: Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
snake_case: int = self.vji + pd_j_all.T * bp_outa * self.rate_weight
snake_case: List[Any] = self.thre_bpa - pd_k_all * self.rate_thre
snake_case: Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
snake_case: Any = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
snake_case: Dict = rp + 1
snake_case: Any = error_count / patterns
all_mse.append(lowercase_ )
def draw_error():
snake_case: str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowercase_ , '+-' )
plt.plot(lowercase_ , 'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(lowercase_ , alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, F""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: List[Any] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(lowercase_ )) )
for p in range(len(lowercase_ ) ):
snake_case: Any = np.asmatrix(datas_test[p] )
snake_case: Any = self.convolute(
lowercase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
snake_case: List[str] = self.pooling(lowercase_ , self.size_poolinga )
snake_case: List[Any] = self._expand(lowercase_ )
snake_case: str = data_bp_input
snake_case: int = bp_outa * self.vji.T - self.thre_bpa
snake_case: Union[str, Any] = self.sig(lowercase_ )
snake_case: Tuple = bp_outa * self.wkj.T - self.thre_bpa
snake_case: Any = self.sig(lowercase_ )
produce_out.extend(bp_outa.getA().tolist() )
snake_case: List[Any] = [list(map(self.do_round , lowercase_ ) ) for each in produce_out]
return np.asarray(lowercase_ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: str = np.asmatrix(lowercase_ )
snake_case: Optional[int] = self.convolute(
lowercase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
snake_case: Dict = self.pooling(lowercase_ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass | 708 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 1_28, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 1_42, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
snake_case: Union[str, Any] = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 1_28,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 1_42,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , x.transpose() ) )
snake_case: List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = np.random.randn(3 , 4 )
snake_case: Optional[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , transpose(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Optional[Any] = np.random.randn(3 , 4 , 5 )
snake_case: Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = np.random.randn(3 , 4 )
snake_case: Optional[Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , transpose(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Dict = np.random.randn(3 , 4 , 5 )
snake_case: str = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(3 , 4 )
snake_case: Optional[int] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , np.asarray(transpose(SCREAMING_SNAKE_CASE__ ) ) ) )
snake_case: Any = np.random.randn(3 , 4 , 5 )
snake_case: Optional[Any] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , np.asarray(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , np.reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) ) )
snake_case: Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , np.reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = np.random.randn(3 , 4 )
snake_case: Tuple = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ).numpy() ) )
snake_case: List[str] = np.random.randn(3 , 4 , 5 )
snake_case: Tuple = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = np.random.randn(3 , 4 )
snake_case: Tuple = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ).numpy() ) )
snake_case: Any = np.random.randn(3 , 4 , 5 )
snake_case: Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(3 , 4 )
snake_case: str = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , np.asarray(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) ) ) )
snake_case: Any = np.random.randn(3 , 4 , 5 )
snake_case: List[str] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , np.asarray(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , np.squeeze(SCREAMING_SNAKE_CASE__ ) ) )
snake_case: List[str] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , np.squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(1 , 3 , 4 )
snake_case: List[str] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , squeeze(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
snake_case: int = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = np.random.randn(1 , 3 , 4 )
snake_case: Optional[Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , squeeze(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
snake_case: Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = np.random.randn(1 , 3 , 4 )
snake_case: List[Any] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , np.asarray(squeeze(SCREAMING_SNAKE_CASE__ ) ) ) )
snake_case: Tuple = np.random.randn(1 , 4 , 1 , 5 )
snake_case: Tuple = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , np.asarray(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , np.expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(3 , 4 )
snake_case: Any = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = np.random.randn(3 , 4 )
snake_case: Any = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = np.random.randn(3 , 4 )
snake_case: int = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , np.asarray(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) ) ) ) | 692 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _lowerCAmelCase ( snake_case_ ):
pass
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
snake_case : Any = data
snake_case : Node | None = None
def __iter__( self ) -> Dict:
'''simple docstring'''
snake_case : Dict = self
snake_case : Dict = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase__ )
yield node.data
snake_case : Optional[int] = node.next_node
@property
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__snake_case = Node(1)
__snake_case = Node(2)
__snake_case = Node(3)
__snake_case = Node(4)
print(root_node.has_loop) # False
__snake_case = root_node.next_node
print(root_node.has_loop) # True
__snake_case = Node(5)
__snake_case = Node(6)
__snake_case = Node(5)
__snake_case = Node(6)
print(root_node.has_loop) # False
__snake_case = Node(1)
print(root_node.has_loop) # False
| 178 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class __a ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = 'focalnet'
def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=False , snake_case=[192, 384, 768, 768] , snake_case=[2, 2, 6, 2] , snake_case=[2, 2, 2, 2] , snake_case=[3, 3, 3, 3] , snake_case="gelu" , snake_case=4.0 , snake_case=0.0 , snake_case=0.1 , snake_case=False , snake_case=1e-4 , snake_case=False , snake_case=False , snake_case=False , snake_case=0.02 , snake_case=1e-5 , snake_case=32 , snake_case=None , snake_case=None , **snake_case , ):
"""simple docstring"""
super().__init__(**snake_case )
lowerCAmelCase__ : Optional[Any] = image_size
lowerCAmelCase__ : Union[str, Any] = patch_size
lowerCAmelCase__ : List[str] = num_channels
lowerCAmelCase__ : List[str] = embed_dim
lowerCAmelCase__ : List[Any] = use_conv_embed
lowerCAmelCase__ : List[str] = hidden_sizes
lowerCAmelCase__ : List[Any] = depths
lowerCAmelCase__ : Union[str, Any] = focal_levels
lowerCAmelCase__ : Union[str, Any] = focal_windows
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : List[Any] = mlp_ratio
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : Union[str, Any] = drop_path_rate
lowerCAmelCase__ : Tuple = use_layerscale
lowerCAmelCase__ : Tuple = layerscale_value
lowerCAmelCase__ : str = use_post_layernorm
lowerCAmelCase__ : str = use_post_layernorm_in_modulation
lowerCAmelCase__ : Union[str, Any] = normalize_modulator
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : List[Any] = layer_norm_eps
lowerCAmelCase__ : str = encoder_stride
lowerCAmelCase__ : Union[str, Any] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
| 453 | 0 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
snake_case_ : int = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
snake_case_ : Union[str, Any] = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
snake_case_ : Dict = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __UpperCamelCase ( self : Dict , _a : Optional[Any] , _a : str , _a : Union[str, Any]=4 , _a : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =compute_bleu(
reference_corpus=_a , translation_corpus=_a , max_order=_a , smooth=_a )
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) =score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
} | 191 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
def lowerCamelCase( a__):
if isinstance(a__ ,(list, tuple)) and isinstance(videos[0] ,(list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(a__ ,(list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(a__):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}")
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = ["pixel_values"]
def __init__( self : Tuple , _a : bool = True , _a : Dict[str, int] = None , _a : PILImageResampling = PILImageResampling.BILINEAR , _a : bool = True , _a : Dict[str, int] = None , _a : bool = True , _a : Union[int, float] = 1 / 255 , _a : bool = True , _a : bool = True , _a : Optional[Union[float, List[float]]] = None , _a : Optional[Union[float, List[float]]] = None , **_a : Any , ) -> None:
"""simple docstring"""
super().__init__(**_a )
_SCREAMING_SNAKE_CASE =size if size is not None else {'''shortest_edge''': 256}
_SCREAMING_SNAKE_CASE =get_size_dict(_a , default_to_square=_a )
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_SCREAMING_SNAKE_CASE =get_size_dict(_a , param_name='''crop_size''' )
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =resample
_SCREAMING_SNAKE_CASE =do_rescale
_SCREAMING_SNAKE_CASE =rescale_factor
_SCREAMING_SNAKE_CASE =offset
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_SCREAMING_SNAKE_CASE =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self : List[Any] , _a : np.ndarray , _a : Dict[str, int] , _a : PILImageResampling = PILImageResampling.BILINEAR , _a : Optional[Union[str, ChannelDimension]] = None , **_a : List[Any] , ) -> np.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =get_size_dict(_a , default_to_square=_a )
if "shortest_edge" in size:
_SCREAMING_SNAKE_CASE =get_resize_output_image_size(_a , size['''shortest_edge'''] , default_to_square=_a )
elif "height" in size and "width" in size:
_SCREAMING_SNAKE_CASE =(size['''height'''], size['''width'''])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __UpperCamelCase ( self : int , _a : np.ndarray , _a : Dict[str, int] , _a : Optional[Union[str, ChannelDimension]] = None , **_a : Dict , ) -> np.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(_a , size=(size['''height'''], size['''width''']) , data_format=_a , **_a )
def __UpperCamelCase ( self : Dict , _a : np.ndarray , _a : Union[int, float] , _a : bool = True , _a : Optional[Union[str, ChannelDimension]] = None , **_a : List[str] , ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =image.astype(np.floataa )
if offset:
_SCREAMING_SNAKE_CASE =image - (scale / 2)
return rescale(_a , scale=_a , data_format=_a , **_a )
def __UpperCamelCase ( self : List[str] , _a : np.ndarray , _a : Union[float, List[float]] , _a : Union[float, List[float]] , _a : Optional[Union[str, ChannelDimension]] = None , **_a : Any , ) -> np.ndarray:
"""simple docstring"""
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __UpperCamelCase ( self : Tuple , _a : ImageInput , _a : bool = None , _a : Dict[str, int] = None , _a : PILImageResampling = None , _a : bool = None , _a : Dict[str, int] = None , _a : bool = None , _a : float = None , _a : bool = None , _a : bool = None , _a : Optional[Union[float, List[float]]] = None , _a : Optional[Union[float, List[float]]] = None , _a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE =to_numpy_array(_a )
if do_resize:
_SCREAMING_SNAKE_CASE =self.resize(image=_a , size=_a , resample=_a )
if do_center_crop:
_SCREAMING_SNAKE_CASE =self.center_crop(_a , size=_a )
if do_rescale:
_SCREAMING_SNAKE_CASE =self.rescale(image=_a , scale=_a , offset=_a )
if do_normalize:
_SCREAMING_SNAKE_CASE =self.normalize(image=_a , mean=_a , std=_a )
_SCREAMING_SNAKE_CASE =to_channel_dimension_format(_a , _a )
return image
def __UpperCamelCase ( self : Tuple , _a : ImageInput , _a : bool = None , _a : Dict[str, int] = None , _a : PILImageResampling = None , _a : bool = None , _a : Dict[str, int] = None , _a : bool = None , _a : float = None , _a : bool = None , _a : bool = None , _a : Optional[Union[float, List[float]]] = None , _a : Optional[Union[float, List[float]]] = None , _a : Optional[Union[str, TensorType]] = None , _a : ChannelDimension = ChannelDimension.FIRST , **_a : str , ) -> PIL.Image.Image:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE =resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE =do_center_crop if do_center_crop is not None else self.do_center_crop
_SCREAMING_SNAKE_CASE =do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE =rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE =offset if offset is not None else self.offset
_SCREAMING_SNAKE_CASE =do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE =image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE =image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE =size if size is not None else self.size
_SCREAMING_SNAKE_CASE =get_size_dict(_a , default_to_square=_a )
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else self.crop_size
_SCREAMING_SNAKE_CASE =get_size_dict(_a , param_name='''crop_size''' )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
_SCREAMING_SNAKE_CASE =make_batched(_a )
_SCREAMING_SNAKE_CASE =[
[
self._preprocess_image(
image=_a , do_resize=_a , size=_a , resample=_a , do_center_crop=_a , crop_size=_a , do_rescale=_a , rescale_factor=_a , offset=_a , do_normalize=_a , image_mean=_a , image_std=_a , data_format=_a , )
for img in video
]
for video in videos
]
_SCREAMING_SNAKE_CASE ={'''pixel_values''': videos}
return BatchFeature(data=_a , tensor_type=_a ) | 191 | 1 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowercase : List[Any] =Lock()
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowercase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase_ =rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase_ =min(lowercase__ , lowercase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowercase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase_ =lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase_ =max(lowercase__ , lowercase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowercase__ )
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =[]
UpperCAmelCase_ =[]
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase_ =Pipe()
UpperCAmelCase_ =Pipe()
process_array_.append(
Process(
target=lowercase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase_ =temp_rs
UpperCAmelCase_ =temp_rr
for i in range(1 , len(lowercase__ ) - 1 ):
UpperCAmelCase_ =Pipe()
UpperCAmelCase_ =Pipe()
process_array_.append(
Process(
target=lowercase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase_ =temp_rs
UpperCAmelCase_ =temp_rr
process_array_.append(
Process(
target=lowercase__ , args=(
len(lowercase__ ) - 1,
arr[len(lowercase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowercase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowercase__ ) ):
UpperCAmelCase_ =result_pipe[p][0].recv()
process_array_[p].join()
return arr
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =list(range(1_0 , 0 , -1 ) )
print("Initial List" )
print(*lowercase__ )
UpperCAmelCase_ =odd_even_transposition(lowercase__ )
print("Sorted List\n" )
print(*lowercase__ )
if __name__ == "__main__":
main()
| 54 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE__ : Dict = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def __magic_name__ ( __lowerCAmelCase : List[str]=None ) -> List[str]:
if subparsers is not None:
__lowerCamelCase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
__lowerCamelCase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
__lowerCamelCase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=__lowerCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=__lowerCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
__lowerCamelCase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=__lowerCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=__lowerCAmelCase )
return parser
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> List[Any]:
__lowerCamelCase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__lowerCAmelCase ):
__lowerCamelCase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__lowerCamelCase = defaults.command_file
if not args.command and defaults.commands is not None:
__lowerCamelCase = defaults.commands
if not args.tpu_name:
__lowerCamelCase = defaults.tpu_name
if not args.tpu_zone:
__lowerCamelCase = defaults.tpu_zone
if args.accelerate_version == "dev":
__lowerCamelCase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
__lowerCamelCase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , __lowerCAmelCase ):
__lowerCamelCase = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
__lowerCamelCase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __lowerCAmelCase ):
__lowerCamelCase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__lowerCamelCase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
__lowerCamelCase = '''; '''.join(__lowerCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__lowerCamelCase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {' '.join(__lowerCAmelCase )}''' )
return
subprocess.run(__lowerCAmelCase )
print('''Successfully setup pod.''' )
def __magic_name__ ( ) -> Dict:
__lowerCamelCase = tpu_command_parser()
__lowerCamelCase = parser.parse_args()
tpu_command_launcher(__lowerCAmelCase )
| 298 | 0 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
lowerCamelCase :Optional[Any] = True
from torch.cuda.amp import autocast
lowerCamelCase :int = logging.getLogger(__name__)
def __snake_case ( _UpperCamelCase=None , _UpperCamelCase=None ) -> Union[str, Any]:
return field(default_factory=lambda: default , metadata=lowercase_ )
@dataclass
class UpperCAmelCase :
a: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a: Optional[str] = field(
default=__snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
a: Optional[bool] = field(
default=__snake_case , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
a: Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
a: Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
a: Optional[float] = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
a: Optional[float] = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
a: Optional[float] = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
a: Optional[float] = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class UpperCAmelCase :
a: Optional[str] = field(
default=__snake_case , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
a: Optional[str] = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to \'train\'"
} , )
a: bool = field(
default=__snake_case , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
a: Optional[int] = field(
default=__snake_case , metadata={"help": "The number of processes to use for the preprocessing."} , )
a: Optional[int] = field(
default=__snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
a: Optional[int] = field(
default=__snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
a: List[str] = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "\'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class UpperCAmelCase :
a: WavaVecaProcessor
a: Union[bool, str] = True
a: Optional[int] = None
a: Optional[int] = None
a: Optional[int] = None
a: Optional[int] = None
def __call__( self: Dict , __UpperCamelCase: int ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
_a = [{"""input_values""": feature["""input_values"""]} for feature in features]
_a = [{"""input_ids""": feature["""labels"""]} for feature in features]
_a = self.processor.pad(
A__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
_a = self.processor.pad(
labels=A__ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
_a = labels_batch["""input_ids"""].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
_a = labels
return batch
class UpperCAmelCase ( __snake_case ):
def _A ( self: Optional[int] , __UpperCamelCase: Optional[Any] , __UpperCamelCase: Dict ):
model.train()
_a = self._prepare_inputs(A__ )
if self.use_amp:
with autocast():
_a = self.compute_loss(A__ , A__ )
else:
_a = self.compute_loss(A__ , A__ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_a = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_a = loss.sum() / (inputs["""labels"""] >= 0).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" )
if self.args.gradient_accumulation_steps > 1:
_a = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(A__ ).backward()
elif self.use_apex:
with amp.scale_loss(A__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(A__ )
else:
loss.backward()
return loss.detach()
def __snake_case ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_a = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , lowercase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_a = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
_a = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
_a = f"[{''.join(data_args.chars_to_ignore )}]"
def remove_special_characters(_UpperCamelCase ):
_a = re.sub(lowercase_ , '''''' , batch['''sentence'''] ).lower() + """ """
return batch
_a = train_dataset.map(lowercase_ , remove_columns=['''sentence'''] )
_a = eval_dataset.map(lowercase_ , remove_columns=['''sentence'''] )
def extract_all_chars(_UpperCamelCase ):
_a = """ """.join(batch['''text'''] )
_a = list(set(lowercase_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
_a = train_dataset.map(
lowercase_ , batched=lowercase_ , batch_size=-1 , keep_in_memory=lowercase_ , remove_columns=train_dataset.column_names , )
_a = train_dataset.map(
lowercase_ , batched=lowercase_ , batch_size=-1 , keep_in_memory=lowercase_ , remove_columns=eval_dataset.column_names , )
_a = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
_a = {v: k for k, v in enumerate(lowercase_ )}
_a = vocab_dict[""" """]
del vocab_dict[" "]
_a = len(lowercase_ )
_a = len(lowercase_ )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(lowercase_ , lowercase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
_a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ )
_a = WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
_a = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_a = min(len(lowercase_ ) , data_args.max_train_samples )
_a = train_dataset.select(range(lowercase_ ) )
if data_args.max_val_samples is not None:
_a = eval_dataset.select(range(data_args.max_val_samples ) )
_a = torchaudio.transforms.Resample(4_80_00 , 1_60_00 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(_UpperCamelCase ):
_a = torchaudio.load(batch['''path'''] )
_a = resampler(lowercase_ ).squeeze().numpy()
_a = 1_60_00
_a = batch["""text"""]
return batch
_a = train_dataset.map(
lowercase_ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_a = eval_dataset.map(
lowercase_ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(_UpperCamelCase ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
_a = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(lowercase_ )
return batch
_a = train_dataset.map(
lowercase_ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , )
_a = eval_dataset.map(
lowercase_ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , )
# Metric
_a = datasets.load_metric('''wer''' )
def compute_metrics(_UpperCamelCase ):
_a = pred.predictions
_a = np.argmax(lowercase_ , axis=-1 )
_a = processor.tokenizer.pad_token_id
_a = processor.batch_decode(lowercase_ )
# we do not want to group tokens when computing the metrics
_a = processor.batch_decode(pred.label_ids , group_tokens=lowercase_ )
_a = wer_metric.compute(predictions=lowercase_ , references=lowercase_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_a = DataCollatorCTCWithPadding(processor=lowercase_ , padding=lowercase_ )
# Initialize our Trainer
_a = CTCTrainer(
model=lowercase_ , data_collator=lowercase_ , args=lowercase_ , compute_metrics=lowercase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_a = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_a = model_args.model_name_or_path
else:
_a = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_a = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
_a = train_result.metrics
_a = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase_ )
)
_a = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics('''train''' , lowercase_ )
trainer.save_metrics('''train''' , lowercase_ )
trainer.save_state()
# Evaluation
_a = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_a = trainer.evaluate()
_a = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowercase_ )
_a = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics('''eval''' , lowercase_ )
trainer.save_metrics('''eval''' , lowercase_ )
return results
if __name__ == "__main__":
main()
| 720 |
import csv
import tweepy
# Twitter API credentials
lowerCamelCase :Optional[int] = ''
lowerCamelCase :Tuple = ''
lowerCamelCase :Tuple = ''
lowerCamelCase :Optional[Any] = ''
def __snake_case ( _UpperCamelCase ) -> None:
# authorize twitter, initialize tweepy
_a = tweepy.OAuthHandler(_UpperCamelCase , _UpperCamelCase )
auth.set_access_token(_UpperCamelCase , _UpperCamelCase )
_a = tweepy.API(_UpperCamelCase )
# initialize a list to hold all the tweepy Tweets
_a = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_a = api.user_timeline(screen_name=_UpperCamelCase , count=2_00 )
# save most recent tweets
alltweets.extend(_UpperCamelCase )
# save the id of the oldest tweet less one
_a = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_UpperCamelCase ) > 0:
print(f"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
_a = api.user_timeline(
screen_name=_UpperCamelCase , count=2_00 , max_id=_UpperCamelCase )
# save most recent tweets
alltweets.extend(_UpperCamelCase )
# update the id of the oldest tweet less one
_a = alltweets[-1].id - 1
print(f"...{len(_UpperCamelCase )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
_a = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"new_{screen_name}_tweets.csv" , '''w''' ) as f:
_a = csv.writer(_UpperCamelCase )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(_UpperCamelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 346 | 0 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=36 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=1000 , ):
lowercase : int = parent
lowercase : Union[str, Any] = batch_size
lowercase : Tuple = num_channels
lowercase : List[str] = image_size
lowercase : str = patch_size
lowercase : Optional[Any] = is_training
lowercase : Optional[Any] = use_input_mask
lowercase : Tuple = use_token_type_ids
lowercase : Union[str, Any] = use_labels
lowercase : str = vocab_size
lowercase : int = hidden_size
lowercase : Any = num_hidden_layers
lowercase : Dict = num_attention_heads
lowercase : List[str] = intermediate_size
lowercase : Union[str, Any] = hidden_act
lowercase : Optional[int] = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : Optional[Any] = max_position_embeddings
lowercase : Optional[Any] = type_vocab_size
lowercase : str = type_sequence_label_size
lowercase : Tuple = initializer_range
lowercase : List[Any] = coordinate_size
lowercase : Optional[int] = shape_size
lowercase : List[Any] = num_labels
lowercase : int = num_choices
lowercase : str = scope
lowercase : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowercase : Optional[Any] = text_seq_length
lowercase : Optional[int] = (image_size // patch_size) ** 2 + 1
lowercase : int = self.text_seq_length + self.image_seq_length
def __lowerCamelCase ( self ):
lowercase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowercase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
lowercase : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase : List[str] = bbox[i, j, 3]
lowercase : str = bbox[i, j, 1]
lowercase : List[Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase : Optional[Any] = bbox[i, j, 2]
lowercase : Dict = bbox[i, j, 0]
lowercase : Optional[int] = tmp_coordinate
lowercase : Dict = tf.constant(lowerCAmelCase_ )
lowercase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : List[str] = None
if self.use_input_mask:
lowercase : int = random_attention_mask([self.batch_size, self.text_seq_length] )
lowercase : Optional[int] = None
if self.use_token_type_ids:
lowercase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowercase : Any = None
lowercase : Dict = None
if self.use_labels:
lowercase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : int = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowercase : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = TFLayoutLMvaModel(config=lowerCAmelCase_ )
# text + image
lowercase : Tuple = model(lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , training=lowerCAmelCase_ )
lowercase : Tuple = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , training=lowerCAmelCase_ , )
lowercase : str = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowercase : Any = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowercase : List[str] = model({'''pixel_values''': pixel_values} , training=lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = self.num_labels
lowercase : str = TFLayoutLMvaForSequenceClassification(config=lowerCAmelCase_ )
lowercase : List[str] = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = self.num_labels
lowercase : Dict = TFLayoutLMvaForTokenClassification(config=lowerCAmelCase_ )
lowercase : int = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = 2
lowercase : int = TFLayoutLMvaForQuestionAnswering(config=lowerCAmelCase_ )
lowercase : Any = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , training=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : Dict = config_and_inputs
lowercase : Optional[int] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
A : List[Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
A : Optional[int] = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
A : Tuple = False
A : Any = False
A : int = False
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return True
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
lowercase : List[str] = copy.deepcopy(lowerCAmelCase_ )
if model_class in get_values(lowerCAmelCase_ ):
lowercase : Tuple = {
k: tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCAmelCase_ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
lowercase : Any = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase_ ):
lowercase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
lowercase : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase_ ):
lowercase : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase_ ):
lowercase : Any = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __lowerCamelCase ( self ):
lowercase : Optional[int] = TFLayoutLMvaModelTester(self )
lowercase : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
lowercase , lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : str = model_class(lowerCAmelCase_ )
if getattr(lowerCAmelCase_ , '''hf_compute_loss''' , lowerCAmelCase_ ):
# The number of elements in the loss should be the same as the number of elements in the label
lowercase : Tuple = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
lowercase : Optional[int] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCAmelCase_ )[0]
]
lowercase : Optional[int] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
lowercase : List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
lowercase : str = prepared_for_class.pop('''input_ids''' )
lowercase : Any = model(lowerCAmelCase_ , **lowerCAmelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
lowercase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
lowercase : Optional[Any] = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
lowercase : int = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
lowercase : str = -100
lowercase : List[Any] = tf.convert_to_tensor(lowerCAmelCase_ )
lowercase : str = model(lowerCAmelCase_ , **lowerCAmelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
lowercase : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
lowercase : Optional[int] = model(lowerCAmelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
lowercase : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
# Get keys that were added with the _prepare_for_class function
lowercase : List[str] = prepared_for_class.keys() - inputs_dict.keys()
lowercase : Any = inspect.signature(model.call ).parameters
lowercase : Optional[Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
lowercase : List[Any] = {0: '''input_ids'''}
for label_key in label_keys:
lowercase : Union[str, Any] = signature_names.index(lowerCAmelCase_ )
lowercase : Dict = label_key
lowercase : str = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
lowercase : Dict = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
lowercase : int = prepared_for_class[value]
lowercase : str = tuple(lowerCAmelCase_ )
# Send to model
lowercase : Any = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __lowerCamelCase ( self ):
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCamelCase ( self ):
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase : List[Any] = type
self.model_tester.create_and_check_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCamelCase ( self ):
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCamelCase ( self ):
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCamelCase ( self ):
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Dict = TFLayoutLMvaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def __lowercase ( ) ->Optional[int]:
"""simple docstring"""
lowercase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase_ ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
lowercase : List[str] = self.default_image_processor
lowercase : Union[str, Any] = prepare_img()
lowercase : Tuple = image_processor(images=lowerCAmelCase_ , return_tensors='''tf''' ).pixel_values
lowercase : Any = tf.constant([[1, 2]] )
lowercase : Dict = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
lowercase : List[Any] = model(input_ids=lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
lowercase : Dict = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase_ )
lowercase : Optional[int] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 319 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def snake_case_ (UpperCamelCase : SplitDict ):
'''simple docstring'''
_a = split_dict._to_yaml_list()
assert len(UpperCamelCase ) == len(UpperCamelCase )
_a = SplitDict._from_yaml_list(UpperCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_a = None
# the split name of split_dict takes over the name of the split info object
_a = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase ), SplitInfo(dataset_name='''my_dataset''' )] )
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
_a = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 22 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if height >= 1:
move_tower(height - 1 , lowercase_ , lowercase_ , lowercase_ )
move_disk(lowercase_ , lowercase_ )
move_tower(height - 1 , lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
print('moving disk from' , lowercase_ , 'to' , lowercase_ )
def UpperCamelCase( ):
UpperCAmelCase : Union[str, Any] = int(input('Height of hanoi: ' ).strip() )
move_tower(lowercase_ , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main()
| 720 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695 | 0 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def snake_case_ (UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_a = model.config
_a = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_a = MBartConfig(
is_decoder=UpperCamelCase , is_encoder_decoder=UpperCamelCase , add_cross_attention=UpperCamelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=UpperCamelCase , add_final_layer_norm=UpperCamelCase , )
return encoder_config, decoder_config
def snake_case_ (UpperCamelCase : Tuple ):
'''simple docstring'''
if "encoder.model" in name:
_a = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
_a = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
_a = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_a = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
_a = '''encoder.''' + name
if "attn.proj" in name:
_a = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
_a = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_a = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_a = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_a = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_a = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
_a = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
_a = '''encoder.layernorm.bias'''
return name
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Optional[int] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(UpperCamelCase )
if "qkv" in key:
_a = key.split('''.''' )
_a = int(key_split[3] )
_a = int(key_split[5] )
_a = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_a = val
return orig_state_dict
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Tuple=None , UpperCamelCase : List[str]=False ):
'''simple docstring'''
_a = DonutModel.from_pretrained(UpperCamelCase ).eval()
# load HuggingFace model
_a , _a = get_configs(UpperCamelCase )
_a = DonutSwinModel(UpperCamelCase )
_a = MBartForCausalLM(UpperCamelCase )
_a = VisionEncoderDecoderModel(encoder=UpperCamelCase , decoder=UpperCamelCase )
model.eval()
_a = original_model.state_dict()
_a = convert_state_dict(UpperCamelCase , UpperCamelCase )
model.load_state_dict(UpperCamelCase )
# verify results on scanned document
_a = load_dataset('''hf-internal-testing/example-documents''' )
_a = dataset['''test'''][0]['''image'''].convert('''RGB''' )
_a = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase , from_slow=UpperCamelCase )
_a = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_a = DonutProcessor(UpperCamelCase , UpperCamelCase )
_a = processor(UpperCamelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_a = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_a = '''When is the coffee break?'''
_a = task_prompt.replace('''{user_input}''' , UpperCamelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_a = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_a = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_a = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_a = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_a = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
_a = original_model.decoder.tokenizer(UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors='''pt''' )[
'''input_ids'''
]
_a = original_model.encoder.model.patch_embed(UpperCamelCase )
_a , _a = model.encoder.embeddings(UpperCamelCase )
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 )
# verify encoder hidden states
_a = original_model.encoder(UpperCamelCase )
_a = model.encoder(UpperCamelCase ).last_hidden_state
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-2 )
# verify decoder hidden states
_a = original_model(UpperCamelCase , UpperCamelCase , UpperCamelCase ).logits
_a = model(UpperCamelCase , decoder_input_ids=UpperCamelCase ).logits
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
_snake_case : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
_snake_case : Union[str, Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 22 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __lowercase ( unittest.TestCase ):
def UpperCamelCase__ ( self ) -> Tuple:
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__a = test_metrics
@require_cpu
def UpperCamelCase__ ( self ) -> Tuple:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCamelCase__ ( self ) -> str:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCamelCase__ ( self ) -> Dict:
self.test_metrics.main()
@require_multi_gpu
def UpperCamelCase__ ( self ) -> int:
print(f"Found {torch.cuda.device_count()} devices." )
__a = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
| 539 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase (datasets.BeamBasedBuilder ):
"""simple docstring"""
def A_ ( self : Dict ) -> int:
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ), supervised_keys=__A, )
def A_ ( self : Optional[int], _UpperCAmelCase : Any, _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"examples": get_test_dummy_examples()} )]
def A_ ( self : List[str], _UpperCAmelCase : str, _UpperCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__A )
class lowerCamelCase (datasets.BeamBasedBuilder ):
"""simple docstring"""
def A_ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ), supervised_keys=__A, )
def A_ ( self : int, _UpperCAmelCase : str, _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"examples": get_test_nested_examples()} )
]
def A_ ( self : Optional[Any], _UpperCAmelCase : Dict, _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__A )
def _a ( ) -> Optional[Any]:
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def _a ( ) -> Optional[Any]:
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
@require_beam
def A_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = DummyBeamDataset(cache_dir=__A, beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__A, builder.name, "default", "0.0.0", F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features, datasets.Features({"content": datasets.Value("string" )} ) )
SCREAMING_SNAKE_CASE__ : Tuple = builder.as_dataset()
self.assertEqual(dset["train"].num_rows, __A )
self.assertEqual(dset["train"].info.splits["train"].num_examples, __A )
self.assertDictEqual(dset["train"][0], get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1], get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__A, builder.name, "default", "0.0.0", "dataset_info.json" ) ) )
del dset
@require_beam
def A_ ( self : Tuple ) -> Dict:
"""simple docstring"""
import apache_beam as beam
SCREAMING_SNAKE_CASE__ : str = beam.io.parquetio.WriteToParquet
SCREAMING_SNAKE_CASE__ : str = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE__ : Any = DummyBeamDataset(cache_dir=__A, beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
SCREAMING_SNAKE_CASE__ : Tuple = partial(__A, num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__A, builder.name, "default", "0.0.0", F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__A, builder.name, "default", "0.0.0", F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features, datasets.Features({"content": datasets.Value("string" )} ) )
SCREAMING_SNAKE_CASE__ : List[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows, __A )
self.assertEqual(dset["train"].info.splits["train"].num_examples, __A )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ), sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(__A, builder.name, "default", "0.0.0", "dataset_info.json" ) ) )
del dset
@require_beam
def A_ ( self : List[str] ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE__ : Tuple = DummyBeamDataset(cache_dir=__A )
self.assertRaises(datasets.builder.MissingBeamOptions, builder.download_and_prepare )
@require_beam
def A_ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE__ : Optional[int] = NestedBeamDataset(cache_dir=__A, beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__A, builder.name, "default", "0.0.0", F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features, datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
SCREAMING_SNAKE_CASE__ : Tuple = builder.as_dataset()
self.assertEqual(dset["train"].num_rows, __A )
self.assertEqual(dset["train"].info.splits["train"].num_examples, __A )
self.assertDictEqual(dset["train"][0], get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1], get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__A, builder.name, "default", "0.0.0", "dataset_info.json" ) ) )
del dset
| 718 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> str | Literal[False]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if lista[i] != lista[i]:
count += 1
SCREAMING_SNAKE_CASE__ : Any = "_"
if count > 1:
return False
else:
return "".join(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : list[str] ) -> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
while True:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["$"] * len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE__ : str = compare_string(binary[i] , binary[j] )
if k is False:
SCREAMING_SNAKE_CASE__ : int = "*"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "*"
temp.append("X" )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return pi
SCREAMING_SNAKE_CASE__ : List[str] = list(set(SCREAMING_SNAKE_CASE__ ) )
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Sequence[float] ) -> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for minterm in minterms:
SCREAMING_SNAKE_CASE__ : Optional[int] = ""
for _ in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Any = str(minterm % 2 ) + string
minterm //= 2
temp.append(SCREAMING_SNAKE_CASE__ )
return temp
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _a ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : list[str] ) -> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = [0] * len(SCREAMING_SNAKE_CASE__ )
for i in range(len(chart[0] ) ):
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = -1
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
if chart[j][i] == 1:
count += 1
SCREAMING_SNAKE_CASE__ : List[Any] = j
if count == 1:
SCREAMING_SNAKE_CASE__ : List[str] = 1
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
temp.append(prime_implicants[i] )
while True:
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = -1
SCREAMING_SNAKE_CASE__ : List[str] = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE__ : int = chart[i].count(1 )
if count_n > max_n:
SCREAMING_SNAKE_CASE__ : Tuple = count_n
SCREAMING_SNAKE_CASE__ : str = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
def _a ( SCREAMING_SNAKE_CASE__ : list[str] , SCREAMING_SNAKE_CASE__ : list[str] ) -> list[list[int]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = [[0 for x in range(len(SCREAMING_SNAKE_CASE__ ) )] for x in range(len(SCREAMING_SNAKE_CASE__ ) )]
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prime_implicants[i].count("_" )
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
return chart
def _a ( ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = int(input("Enter the no. of variables\n" ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = [
float(SCREAMING_SNAKE_CASE__ )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
SCREAMING_SNAKE_CASE__ : Tuple = decimal_to_binary(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = check(SCREAMING_SNAKE_CASE__ )
print("Prime Implicants are:" )
print(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = prime_implicant_chart(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = selection(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print("Essential Prime Implicants are:" )
print(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 157 | 0 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
_UpperCAmelCase = input('Enter image url: ').strip()
print(f'''Downloading image from {url} ...''')
_UpperCAmelCase = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
_UpperCAmelCase = soup.find('meta', {'property': 'og:image'})['content']
_UpperCAmelCase = requests.get(image_url).content
_UpperCAmelCase = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f'''Done. Image saved to disk as {file_name}.''') | 504 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
_UpperCAmelCase = {
'b0': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[Any] ) -> Any:
__lowerCAmelCase : List[Any] = EfficientNetConfig()
__lowerCAmelCase : Tuple = CONFIG_MAP[model_name]["""hidden_dim"""]
__lowerCAmelCase : Dict = CONFIG_MAP[model_name]["""width_coef"""]
__lowerCAmelCase : Dict = CONFIG_MAP[model_name]["""depth_coef"""]
__lowerCAmelCase : str = CONFIG_MAP[model_name]["""image_size"""]
__lowerCAmelCase : Any = CONFIG_MAP[model_name]["""dropout_rate"""]
__lowerCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["""dw_padding"""]
__lowerCAmelCase : str = """huggingface/label-files"""
__lowerCAmelCase : Dict = """imagenet-1k-id2label.json"""
__lowerCAmelCase : str = 1_000
__lowerCAmelCase : Optional[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase : Optional[int] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowerCAmelCase : Dict = idalabel
__lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
return config
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
__lowerCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]:
__lowerCAmelCase : int = CONFIG_MAP[model_name]["""image_size"""]
__lowerCAmelCase : int = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=SCREAMING_SNAKE_CASE , )
return preprocessor
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int ) -> Any:
__lowerCAmelCase : str = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
__lowerCAmelCase : int = sorted(set(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[int] = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {b: str(SCREAMING_SNAKE_CASE ) for b, i in zip(SCREAMING_SNAKE_CASE , range(SCREAMING_SNAKE_CASE ) )}
__lowerCAmelCase : Union[str, Any] = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
__lowerCAmelCase : List[Any] = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
__lowerCAmelCase : str = {}
for item in rename_keys:
if item[0] in original_param_names:
__lowerCAmelCase : Tuple = """efficientnet.""" + item[1]
__lowerCAmelCase : Union[str, Any] = """classifier.weight"""
__lowerCAmelCase : Optional[Any] = """classifier.bias"""
return key_mapping
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Tuple ) -> List[Any]:
for key, value in tf_params.items():
if "normalization" in key:
continue
__lowerCAmelCase : Any = key_mapping[key]
if "_conv" in key and "kernel" in key:
__lowerCAmelCase : List[str] = torch.from_numpy(SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__lowerCAmelCase : Tuple = torch.from_numpy(SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__lowerCAmelCase : Dict = torch.from_numpy(np.transpose(SCREAMING_SNAKE_CASE ) )
else:
__lowerCAmelCase : Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any ) -> List[str]:
__lowerCAmelCase : List[str] = model_classes[model_name](
include_top=SCREAMING_SNAKE_CASE , weights="""imagenet""" , input_tensor=SCREAMING_SNAKE_CASE , input_shape=SCREAMING_SNAKE_CASE , pooling=SCREAMING_SNAKE_CASE , classes=1_000 , classifier_activation="""softmax""" , )
__lowerCAmelCase : int = original_model.trainable_variables
__lowerCAmelCase : Tuple = original_model.non_trainable_variables
__lowerCAmelCase : Optional[int] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__lowerCAmelCase : int = param.numpy()
__lowerCAmelCase : int = list(tf_params.keys() )
# Load HuggingFace model
__lowerCAmelCase : int = get_efficientnet_config(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = EfficientNetForImageClassification(SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Union[str, Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
__lowerCAmelCase : Any = rename_keys(SCREAMING_SNAKE_CASE )
replace_params(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
__lowerCAmelCase : Dict = convert_image_processor(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__lowerCAmelCase : Dict = hf_model(**SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = outputs.logits.detach().numpy()
# Original model inference
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : int = CONFIG_MAP[model_name]["""image_size"""]
__lowerCAmelCase : Dict = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__lowerCAmelCase : Optional[int] = image.img_to_array(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.expand_dims(SCREAMING_SNAKE_CASE , axis=0 )
__lowerCAmelCase : Any = original_model.predict(SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
os.mkdir(SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
__lowerCAmelCase : Tuple = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
_UpperCAmelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub) | 504 | 1 |
_UpperCAmelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_9344,
"knot": 1.852,
}
_UpperCAmelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_7777_7778,
"mph": 0.6_2137_1192,
"knot": 0.5_3995_6803,
}
def UpperCamelCase ( lowercase_ : float , lowercase_ : str , lowercase_ : str ) -> str:
'''simple docstring'''
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
lowercase =(
f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
f'Valid values are: {", ".join(lowerCamelCase_ )}'
)
raise ValueError(lowerCamelCase_ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : str = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'pix2struct_text_model'
UpperCamelCase__ = ['past_key_values']
UpperCamelCase__ = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , snake_case_=5_02_44 , snake_case_=7_68 , snake_case_=64 , snake_case_=20_48 , snake_case_=12 , snake_case_=12 , snake_case_=32 , snake_case_=1_28 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=1.0 , snake_case_="gelu_new" , snake_case_=0 , snake_case_=False , snake_case_=0 , snake_case_=1 , snake_case_=False , snake_case_=True , **snake_case_ , ):
lowercase =vocab_size
lowercase =hidden_size
lowercase =d_kv
lowercase =d_ff
lowercase =num_layers
lowercase =num_heads
lowercase =relative_attention_num_buckets
lowercase =relative_attention_max_distance
lowercase =dropout_rate
lowercase =layer_norm_epsilon
lowercase =initializer_factor
lowercase =use_cache
lowercase =eos_token_id
lowercase =decoder_start_token_id
# for backwards compatibility
lowercase =dense_act_fn
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , tie_word_embeddings=snake_case_ , is_decoder=snake_case_ , **snake_case_ , )
@classmethod
def _A( cls , snake_case_ , **snake_case_ ):
cls._set_token_in_kwargs(snake_case_ )
lowercase , lowercase =cls.get_config_dict(snake_case_ , **snake_case_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
lowercase =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'pix2struct_vision_model'
def __init__( self , snake_case_=7_68 , snake_case_=7_68 , snake_case_=20_48 , snake_case_=64 , snake_case_=12 , snake_case_=12 , snake_case_="gelu_new" , snake_case_=1E-6 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=1E-10 , snake_case_=1.0 , snake_case_=40_96 , snake_case_=32 , snake_case_=1_28 , **snake_case_ , ):
super().__init__(**snake_case_ )
lowercase =hidden_size
lowercase =patch_embed_hidden_size
lowercase =d_ff
lowercase =dropout_rate
lowercase =num_hidden_layers
lowercase =num_attention_heads
lowercase =initializer_range
lowercase =initializer_factor
lowercase =attention_dropout
lowercase =layer_norm_eps
lowercase =dense_act_fn
lowercase =seq_len
lowercase =relative_attention_num_buckets
lowercase =relative_attention_max_distance
lowercase =d_kv
@classmethod
def _A( cls , snake_case_ , **snake_case_ ):
cls._set_token_in_kwargs(snake_case_ )
lowercase , lowercase =cls.get_config_dict(snake_case_ , **snake_case_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
lowercase =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'pix2struct'
UpperCamelCase__ = True
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=1.0 , snake_case_=0.02 , snake_case_=False , snake_case_=False , snake_case_=True , **snake_case_ , ):
super().__init__(tie_word_embeddings=snake_case_ , is_encoder_decoder=snake_case_ , **snake_case_ )
if text_config is None:
lowercase ={}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
lowercase ={}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
lowercase =PixaStructTextConfig(**snake_case_ )
lowercase =PixaStructVisionConfig(**snake_case_ )
lowercase =self.text_config.decoder_start_token_id
lowercase =self.text_config.pad_token_id
lowercase =self.text_config.eos_token_id
lowercase =initializer_factor
lowercase =initializer_range
lowercase =self.initializer_range
lowercase =self.initializer_range
lowercase =is_vqa
@classmethod
def _A( cls , snake_case_ , snake_case_ , **snake_case_ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case_ )
def _A( self ):
lowercase =copy.deepcopy(self.__dict__ )
lowercase =self.text_config.to_dict()
lowercase =self.vision_config.to_dict()
lowercase =self.__class__.model_type
return output
| 145 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__magic_name__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
| 155 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : Optional[Any] = logging.get_logger(__name__)
snake_case : str = {'vocab_file': 'sentencepiece.model'}
snake_case : List[str] = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
snake_case : List[str] = {
'google/rembert': 256,
}
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : int = VOCAB_FILES_NAMES
UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , **__UpperCAmelCase , ):
"""simple docstring"""
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , )
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor()
self.sp_model.Load(__UpperCAmelCase )
@property
def __magic_name__ ( self ):
"""simple docstring"""
return len(self.sp_model )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = d
__lowercase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
"""simple docstring"""
__lowercase = self.sp_model.EncodeAsPieces(__UpperCAmelCase )
return pieces
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.sp_model.PieceToId(__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.sp_model.IdToPiece(__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = self.sp_model.decode_pieces(__UpperCAmelCase )
return out_string
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(__UpperCAmelCase ) )
return
__lowercase = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 566 | 0 |
"""simple docstring"""
import re
import subprocess
import sys
__SCREAMING_SNAKE_CASE : Optional[Any] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode("""utf-8""").split()
__SCREAMING_SNAKE_CASE : List[str] = """|""".join(sys.argv[1:])
__SCREAMING_SNAKE_CASE : Optional[int] = re.compile(RF"""^({joined_dirs}).*?\.py$""")
__SCREAMING_SNAKE_CASE : str = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 700 |
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__SCREAMING_SNAKE_CASE : List[str] = numpy.array([0, 0])
__SCREAMING_SNAKE_CASE : Optional[Any] = numpy.array([0.5, 0.866_0254])
__SCREAMING_SNAKE_CASE : Tuple = numpy.array([1, 0])
__SCREAMING_SNAKE_CASE : List[Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] , lowercase_ : int ) -> list[numpy.ndarray]:
_lowerCamelCase = initial_vectors
for _ in range(lowercase_ ):
_lowerCamelCase = iteration_step(lowercase_ )
return vectors
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
_lowerCamelCase = []
for i, start_vector in enumerate(vectors[:-1] ):
_lowerCamelCase = vectors[i + 1]
new_vectors.append(lowercase_ )
_lowerCamelCase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCAmelCase_( lowercase_ : numpy.ndarray , lowercase_ : float ) -> numpy.ndarray:
_lowerCamelCase = numpy.radians(lowercase_ )
_lowerCamelCase , _lowerCamelCase = numpy.cos(lowercase_ ), numpy.sin(lowercase_ )
_lowerCamelCase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowercase_ , lowercase_ )
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> None:
_lowerCamelCase = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_lowerCamelCase , _lowerCamelCase = zip(*lowercase_ )
plt.plot(lowercase_ , lowercase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : str = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 623 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[str]=7 ,_a : Dict=True ,_a : List[Any]=True ,_a : Dict=False ,_a : Optional[int]=True ,_a : List[Any]=99 ,_a : Any=32 ,_a : Optional[int]=5 ,_a : List[Any]=4 ,_a : int=37 ,_a : List[Any]="gelu" ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=512 ,_a : int=16 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : Any=3 ,_a : Any=4 ,_a : List[str]=None ,):
'''simple docstring'''
A_ : List[str] = parent
A_ : Any = batch_size
A_ : Tuple = seq_length
A_ : List[str] = is_training
A_ : Tuple = use_input_mask
A_ : Dict = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : str = intermediate_size
A_ : Tuple = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : int = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[Any] = num_labels
A_ : Optional[Any] = num_choices
A_ : List[Any] = scope
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Dict = None
if self.use_token_type_ids:
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : str = None
A_ : Any = None
A_ : str = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
A_ : Any = LlamaModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[Any] = model(_a ,attention_mask=_a )
A_ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[int] ,_a : int ,_a : List[str] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : List[str] ,):
'''simple docstring'''
A_ : List[str] = True
A_ : Union[str, Any] = LlamaModel(_a )
model.to(_a )
model.eval()
A_ : Tuple = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,)
A_ : List[Any] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,)
A_ : int = model(_a ,attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Any ,_a : Any ,_a : Optional[int] ,_a : List[Any] ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : List[Any] = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : str ,_a : List[Any] ,_a : Dict ,_a : str ,_a : Tuple ,_a : Tuple ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : Optional[Any] = True
A_ : Any = True
A_ : Tuple = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
A_ : Optional[int] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,use_cache=_a ,)
A_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
A_ : int = torch.cat([input_mask, next_mask] ,dim=-1 )
A_ : List[str] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
A_ : Any = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
# select random slice
A_ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1e-3 ) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Any = config_and_inputs
A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
a_ = (LlamaForCausalLM,) if is_torch_available() else ()
a_ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = LlamaModelTester(self )
A_ : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Dict = type
self.model_tester.create_and_check_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = 3
A_ : Any = input_dict["""input_ids"""]
A_ : Union[str, Any] = input_ids.ne(1 ).to(_a )
A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : int = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Dict ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = 3
A_ : Union[str, Any] = """single_label_classification"""
A_ : Union[str, Any] = input_dict["""input_ids"""]
A_ : List[Any] = input_ids.ne(1 ).to(_a )
A_ : Dict = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : List[str] = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = 3
A_ : Dict = """multi_label_classification"""
A_ : Any = input_dict["""input_ids"""]
A_ : Optional[Any] = input_ids.ne(1 ).to(_a )
A_ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : Optional[int] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def _a ( self : Any ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a ( self : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ids_tensor([1, 10] ,config.vocab_size )
A_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : int = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
A_ : Tuple = original_model(_a ).last_hidden_state
A_ : Union[str, Any] = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : Tuple = {"""type""": scaling_type, """factor""": 10.0}
A_ : int = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
A_ : List[Any] = scaled_model(_a ).last_hidden_state
A_ : Any = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a ,_a ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" )
A_ : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" )
A_ : int = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
A_ : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
A_ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
A_ : List[str] = """Simply put, the theory of relativity states that """
A_ : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
A_ : Union[str, Any] = tokenizer.encode(_a ,return_tensors="""pt""" )
A_ : List[str] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=_a )
# greedy generation outputs
A_ : str = model.generate(_a ,max_new_tokens=64 ,top_p=_a ,temperature=1 ,do_sample=_a )
A_ : Optional[Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_a )
self.assertEqual(_a ,_a )
| 665 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__magic_name__ = 16
__magic_name__ = 32
def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16):
A_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""")
A_ : str = load_dataset("""glue""" , """mrpc""")
def tokenize_function(lowerCamelCase : Dict):
# max_length=None => use the model max length (it's actually the default)
A_ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : Tuple = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(lowerCamelCase : Tuple):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : List[Any] = 16
elif accelerator.mixed_precision != "no":
A_ : Any = 8
else:
A_ : Tuple = None
return tokenizer.pad(
lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
A_ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=lowerCamelCase)
A_ : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict):
# Initialize accelerator
A_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : List[Any] = config["""lr"""]
A_ : List[Any] = int(config["""num_epochs"""])
A_ : int = int(config["""seed"""])
A_ : Dict = int(config["""batch_size"""])
A_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
A_ : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A_ : Any = batch_size // MAX_GPU_BATCH_SIZE
A_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase)
A_ , A_ : List[str] = get_dataloaders(lowerCamelCase , lowerCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : str = model.to(accelerator.device)
# Instantiate optimizer
A_ : str = AdamW(params=model.parameters() , lr=lowerCamelCase)
# Instantiate scheduler
A_ : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Now we train the model
for epoch in range(lowerCamelCase):
model.train()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
A_ : Optional[int] = model(**lowerCamelCase)
A_ : List[Any] = outputs.loss
A_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
A_ : Union[str, Any] = model(**lowerCamelCase)
A_ : Any = outputs.logits.argmax(dim=-1)
A_ , A_ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
A_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase)
def lowerCamelCase ( ):
A_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
A_ : Dict = parser.parse_args()
A_ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 1 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = fname.split(os.path.sep )[-1]
return re.search(r"""^(.*)_\d+\.jpg$""" , __UpperCamelCase ).groups()[0]
class __lowercase ( __lowerCamelCase ):
def __init__( self : List[Any] ,A : List[Any] ,A : Dict=None ,A : Any=None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = file_names
UpperCAmelCase__ : Optional[Any] = image_transform
UpperCAmelCase__ : str = label_to_id
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.file_names )
def __getitem__( self : List[str] ,A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.file_names[idx]
UpperCAmelCase__ : str = PIL.Image.open(A )
UpperCAmelCase__ : Any = raw_image.convert("""RGB""" )
if self.image_transform is not None:
UpperCAmelCase__ : Tuple = self.image_transform(A )
UpperCAmelCase__ : Union[str, Any] = extract_label(A )
if self.label_to_id is not None:
UpperCAmelCase__ : Dict = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if args.with_tracking:
UpperCAmelCase__ : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
UpperCAmelCase__ : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ : List[str] = config["""lr"""]
UpperCAmelCase__ : Optional[int] = int(config["""num_epochs"""] )
UpperCAmelCase__ : str = int(config["""seed"""] )
UpperCAmelCase__ : Tuple = int(config["""batch_size"""] )
UpperCAmelCase__ : int = config["""image_size"""]
if not isinstance(__UpperCamelCase , (list, tuple) ):
UpperCAmelCase__ : Any = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , """isdigit""" ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase__ : Any = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase__ : Any = int(args.checkpointing_steps )
else:
raise ValueError(
F"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
UpperCAmelCase__ : Any = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase__ : int = os.path.split(__UpperCamelCase )[-1].split(""".""" )[0]
accelerator.init_trackers(__UpperCamelCase , __UpperCamelCase )
# Grab all the image filenames
UpperCAmelCase__ : str = [os.path.join(args.data_dir , __UpperCamelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith(""".jpg""" )]
# Build the label correspondences
UpperCAmelCase__ : Any = [extract_label(__UpperCamelCase ) for fname in file_names]
UpperCAmelCase__ : Any = list(set(__UpperCamelCase ) )
id_to_label.sort()
UpperCAmelCase__ : Tuple = {lbl: i for i, lbl in enumerate(__UpperCamelCase )}
# Set the seed before splitting the data.
np.random.seed(__UpperCamelCase )
torch.manual_seed(__UpperCamelCase )
torch.cuda.manual_seed_all(__UpperCamelCase )
# Split our filenames between train and validation
UpperCAmelCase__ : Optional[int] = np.random.permutation(len(__UpperCamelCase ) )
UpperCAmelCase__ : Dict = int(0.8 * len(__UpperCamelCase ) )
UpperCAmelCase__ : List[str] = random_perm[:cut]
UpperCAmelCase__ : int = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase__ : int = Compose([RandomResizedCrop(__UpperCamelCase , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase__ : Union[str, Any] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__UpperCamelCase , label_to_id=__UpperCamelCase )
# For evaluation, we use a deterministic Resize
UpperCAmelCase__ : List[Any] = Compose([Resize(__UpperCamelCase ), ToTensor()] )
UpperCAmelCase__ : Dict = PetsDataset([file_names[i] for i in eval_split] , image_transform=__UpperCamelCase , label_to_id=__UpperCamelCase )
# Instantiate dataloaders.
UpperCAmelCase__ : Union[str, Any] = DataLoader(__UpperCamelCase , shuffle=__UpperCamelCase , batch_size=__UpperCamelCase , num_workers=4 )
UpperCAmelCase__ : Optional[int] = DataLoader(__UpperCamelCase , shuffle=__UpperCamelCase , batch_size=__UpperCamelCase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ : Dict = create_model("""resnet50d""" , pretrained=__UpperCamelCase , num_classes=len(__UpperCamelCase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__ : Optional[int] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase__ : List[str] = False
for param in model.get_classifier().parameters():
UpperCAmelCase__ : Tuple = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase__ : Union[str, Any] = torch.tensor(model.default_cfg["""mean"""] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase__ : Optional[int] = torch.tensor(model.default_cfg["""std"""] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__ : Any = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase__ : Optional[Any] = OneCycleLR(optimizer=__UpperCamelCase , max_lr=__UpperCamelCase , epochs=__UpperCamelCase , steps_per_epoch=len(__UpperCamelCase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase__ : Optional[Any] = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase__ : List[str] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase__ : Dict = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase__ : Union[str, Any] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase__ : Optional[int] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase__ : Dict = os.path.splitext(__UpperCamelCase )[0]
if "epoch" in training_difference:
UpperCAmelCase__ : Any = int(training_difference.replace("""epoch_""" , """""" ) ) + 1
UpperCAmelCase__ : Union[str, Any] = None
else:
UpperCAmelCase__ : Tuple = int(training_difference.replace("""step_""" , """""" ) )
UpperCAmelCase__ : List[str] = resume_step // len(__UpperCamelCase )
resume_step -= starting_epoch * len(__UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase , __UpperCamelCase ):
model.train()
if args.with_tracking:
UpperCAmelCase__ : Optional[Any] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase__ : Dict = accelerator.skip_first_batches(__UpperCamelCase , __UpperCamelCase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase__ : List[Any] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase__ : Dict = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase__ : Union[str, Any] = (batch["""image"""] - mean) / std
UpperCAmelCase__ : Tuple = model(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = torch.nn.functional.cross_entropy(__UpperCamelCase , batch["""label"""] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Tuple = F"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase__ : Dict = os.path.join(args.output_dir , __UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : List[Any] = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase__ : Tuple = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase__ : Tuple = (batch["""image"""] - mean) / std
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = outputs.argmax(dim=-1 )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = accelerator.gather_for_metrics((predictions, batch["""label"""]) )
UpperCAmelCase__ : List[str] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase__ : str = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}: {100 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
"""accuracy""": 100 * eval_metric,
"""train_loss""": total_loss.item() / len(__UpperCamelCase ),
"""epoch""": epoch,
} , step=__UpperCamelCase , )
if checkpointing_steps == "epoch":
UpperCAmelCase__ : Optional[int] = F"epoch_{epoch}"
if args.output_dir is not None:
UpperCAmelCase__ : Union[str, Any] = os.path.join(args.output_dir , __UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument("""--data_dir""" , required=__UpperCamelCase , help="""The data folder on disk.""" )
parser.add_argument("""--fp16""" , action="""store_true""" , help="""If passed, will use FP16 training.""" )
parser.add_argument(
"""--mixed_precision""" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--checkpointing_steps""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.""" , )
parser.add_argument(
"""--output_dir""" , type=__UpperCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=__UpperCamelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
UpperCAmelCase__ : Any = parser.parse_args()
UpperCAmelCase__ : Optional[Any] = {"""lr""": 3e-2, """num_epochs""": 3, """seed""": 42, """batch_size""": 64, """image_size""": 224}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 194 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__UpperCAmelCase = 'examples/'
__UpperCAmelCase = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
__UpperCAmelCase = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
__UpperCAmelCase = 'README.md'
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase__ : Dict = f.read()
UpperCAmelCase__ , UpperCAmelCase__ : Any = REPLACE_PATTERNS[pattern]
UpperCAmelCase__ : List[str] = replace.replace("""VERSION""" , __UpperCamelCase )
UpperCAmelCase__ : Tuple = re_pattern.sub(__UpperCamelCase , __UpperCamelCase )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern="""examples""" )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Dict = """🤗 Transformers currently provides the following architectures"""
UpperCAmelCase__ : Union[str, Any] = """1. Want to contribute a new model?"""
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase__ : str = f.readlines()
# Find the start of the list.
UpperCAmelCase__ : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase__ : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
UpperCAmelCase__ : List[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCamelCase )
def lowerCAmelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
UpperCAmelCase__ : Optional[Any] = f.read()
UpperCAmelCase__ : int = REPLACE_PATTERNS["""init"""][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase=False ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
UpperCAmelCase__ : List[str] = default_version.base_version
elif patch:
UpperCAmelCase__ : Any = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
UpperCAmelCase__ : Optional[Any] = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
UpperCAmelCase__ : Optional[int] = input(F"Which version are you releasing? [{default_version}]" )
if len(__UpperCamelCase ) == 0:
UpperCAmelCase__ : Union[str, Any] = default_version
print(F"Updating version to {version}." )
global_version_update(__UpperCamelCase , patch=__UpperCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = get_version()
UpperCAmelCase__ : Any = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
UpperCAmelCase__ : List[Any] = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase__ : Dict = input(F"Which version are we developing now? [{dev_version}]" )
if len(__UpperCamelCase ) == 0:
UpperCAmelCase__ : List[Any] = dev_version
print(F"Updating version to {version}." )
global_version_update(__UpperCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
__UpperCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 194 | 1 |
from heapq import heappop, heappush
import numpy as np
def a ( A__ , A__ , A__ , A__ , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = grid.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [-1, 1, 0, 0]
SCREAMING_SNAKE_CASE__ : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = [(0, source)], set()
SCREAMING_SNAKE_CASE__ : Optional[int] = np.full((rows, cols) , np.inf )
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Any = np.empty((rows, cols) , dtype=A__ )
SCREAMING_SNAKE_CASE__ : List[str] = None
while queue:
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : Any = heappop(A__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
SCREAMING_SNAKE_CASE__ : Any = []
while (x, y) != source:
path.append((x, y) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = predecessors[x, y]
path.append(A__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(A__ ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(A__ , (dist + 1, (nx, ny)) )
SCREAMING_SNAKE_CASE__ : List[Any] = dist + 1
SCREAMING_SNAKE_CASE__ : List[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_lowerCAmelCase = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A ,_A=None ,_A=1 ):
'''simple docstring'''
_lowerCAmelCase : Any = tokenizer
_lowerCAmelCase : str = dataset
_lowerCAmelCase : List[Any] = len(_A ) if n_tasks is None else n_tasks
_lowerCAmelCase : Union[str, Any] = n_copies
def __iter__( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
_lowerCAmelCase : List[Any] = self.tokenizer(_A ,padding=_A ,return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = start_length
_lowerCAmelCase : str = eof_strings
_lowerCAmelCase : int = tokenizer
def __call__( self ,_A ,_A ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_lowerCAmelCase : str = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_A )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
_lowerCAmelCase : Any = batch['ids'].shape[-1]
_lowerCAmelCase : List[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
_lowerCAmelCase : List[Any] = batch['task_id'].repeat(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
_lowerCAmelCase : Union[str, Any] = generated_tokens.cpu().numpy()
_lowerCAmelCase : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_lowerCAmelCase : Union[str, Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_lowerCAmelCase : Union[str, Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_lowerCAmelCase : Optional[int] = 'false'
if args.num_workers is None:
_lowerCAmelCase : str = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_lowerCAmelCase : List[str] = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
_lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
_lowerCAmelCase : List[str] = tokenizer.eos_token
_lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_lowerCAmelCase : int = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
_lowerCAmelCase : List[Any] = load_dataset('openai_humaneval' )
_lowerCAmelCase : int = load_metric('code_eval' )
_lowerCAmelCase : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
_lowerCAmelCase : int = args.n_samples // args.batch_size
_lowerCAmelCase : Tuple = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_lowerCAmelCase : Any = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_lowerCAmelCase : Dict = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
_lowerCAmelCase, _lowerCAmelCase : List[Any] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
_lowerCAmelCase : Optional[Any] = []
for task in tqdm(range(_lowerCamelCase ) ):
_lowerCAmelCase : Any = human_eval['test'][task]['test']
_lowerCAmelCase : Union[str, Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
_lowerCAmelCase, _lowerCAmelCase : List[str] = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 259 | 0 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class a ( __UpperCAmelCase ):
def __init__( self : List[Any] , *snake_case__ : Dict , **snake_case__ : Optional[Any] ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
requires_backends(self , "vision" )
self.check_model_type(snake_case__ )
def __call__( self : int , snake_case__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **snake_case__ : int ):
"""simple docstring"""
return super().__call__(snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self : Dict , **snake_case__ : Optional[int] ):
"""simple docstring"""
return {}, {}, {}
def UpperCAmelCase__ ( self : List[str] , snake_case__ : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = load_image(snake_case__ )
__lowerCAmelCase = image.size
__lowerCAmelCase = self.image_processor(images=snake_case__ , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : int ):
"""simple docstring"""
__lowerCAmelCase = self.model(**snake_case__ )
return model_outputs
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = model_outputs.predicted_depth
__lowerCAmelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=snake_case__ )
__lowerCAmelCase = prediction.squeeze().cpu().numpy()
__lowerCAmelCase = (output * 255 / np.max(snake_case__ )).astype("uint8" )
__lowerCAmelCase = Image.fromarray(snake_case__ )
__lowerCAmelCase = {}
__lowerCAmelCase = predicted_depth
__lowerCAmelCase = depth
return output_dict
| 702 |
import fire
from utils import calculate_rouge, save_json
def _UpperCAmelCase ( UpperCamelCase: Any , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any]=None , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = [x.strip() for x in open(UpperCamelCase ).readlines()]
__lowerCAmelCase = [x.strip() for x in open(UpperCamelCase ).readlines()][: len(UpperCamelCase )]
__lowerCAmelCase = calculate_rouge(UpperCamelCase , UpperCamelCase , **UpperCamelCase )
if save_path is not None:
save_json(UpperCamelCase , UpperCamelCase , indent=UpperCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 376 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case : Any = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 53 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = '''align_text_model'''
def __init__( self : int , __a : Optional[int]=30522 , __a : int=768 , __a : Optional[Any]=12 , __a : Any=12 , __a : Tuple=3072 , __a : Tuple="gelu" , __a : List[Any]=0.1 , __a : Optional[int]=0.1 , __a : Dict=512 , __a : List[Any]=2 , __a : Dict=0.02 , __a : Optional[int]=1E-12 , __a : int=0 , __a : Optional[int]="absolute" , __a : Tuple=True , **__a : Union[str, Any] , ) -> Any:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Tuple = vocab_size
__lowercase : Dict = hidden_size
__lowercase : Tuple = num_hidden_layers
__lowercase : Union[str, Any] = num_attention_heads
__lowercase : Optional[Any] = hidden_act
__lowercase : Tuple = intermediate_size
__lowercase : List[str] = hidden_dropout_prob
__lowercase : List[str] = attention_probs_dropout_prob
__lowercase : Any = max_position_embeddings
__lowercase : str = type_vocab_size
__lowercase : List[str] = initializer_range
__lowercase : Optional[int] = layer_norm_eps
__lowercase : Optional[int] = position_embedding_type
__lowercase : Union[str, Any] = use_cache
__lowercase : int = pad_token_id
@classmethod
def lowerCAmelCase ( cls : Tuple , __a : Union[str, os.PathLike] , **__a : Union[str, Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__a )
__lowercase , __lowercase : List[Any] = cls.get_config_dict(__a , **__a )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
__lowercase : Tuple = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__a , **__a )
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''align_vision_model'''
def __init__( self : List[str] , __a : int = 3 , __a : int = 600 , __a : float = 2.0 , __a : float = 3.1 , __a : int = 8 , __a : List[int] = [3, 3, 5, 3, 5, 5, 3] , __a : List[int] = [32, 16, 24, 40, 80, 112, 192] , __a : List[int] = [16, 24, 40, 80, 112, 192, 320] , __a : List[int] = [] , __a : List[int] = [1, 2, 2, 2, 1, 2, 1] , __a : List[int] = [1, 2, 2, 3, 3, 4, 1] , __a : List[int] = [1, 6, 6, 6, 6, 6, 6] , __a : float = 0.25 , __a : str = "swish" , __a : int = 2560 , __a : str = "mean" , __a : float = 0.02 , __a : float = 0.001 , __a : float = 0.99 , __a : float = 0.2 , **__a : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Any = num_channels
__lowercase : Tuple = image_size
__lowercase : Tuple = width_coefficient
__lowercase : Any = depth_coefficient
__lowercase : str = depth_divisor
__lowercase : Union[str, Any] = kernel_sizes
__lowercase : int = in_channels
__lowercase : List[Any] = out_channels
__lowercase : int = depthwise_padding
__lowercase : Union[str, Any] = strides
__lowercase : Optional[int] = num_block_repeats
__lowercase : List[str] = expand_ratios
__lowercase : int = squeeze_expansion_ratio
__lowercase : str = hidden_act
__lowercase : List[str] = hidden_dim
__lowercase : Dict = pooling_type
__lowercase : Any = initializer_range
__lowercase : Tuple = batch_norm_eps
__lowercase : int = batch_norm_momentum
__lowercase : Tuple = drop_connect_rate
__lowercase : Tuple = sum(__a ) * 4
@classmethod
def lowerCAmelCase ( cls : str , __a : Union[str, os.PathLike] , **__a : Union[str, Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__a )
__lowercase , __lowercase : Optional[int] = cls.get_config_dict(__a , **__a )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
__lowercase : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__a , **__a )
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Union[str, Any] = '''align'''
_A : Optional[int] = True
def __init__( self : Optional[Any] , __a : Optional[int]=None , __a : str=None , __a : int=640 , __a : List[Any]=1.0 , __a : Optional[int]=0.02 , **__a : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(**__a )
if text_config is None:
__lowercase : Optional[Any] = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
__lowercase : Dict = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
__lowercase : str = AlignTextConfig(**__a )
__lowercase : int = AlignVisionConfig(**__a )
__lowercase : str = projection_dim
__lowercase : Optional[int] = temperature_init_value
__lowercase : Dict = initializer_range
@classmethod
def lowerCAmelCase ( cls : List[Any] , __a : AlignTextConfig , __a : AlignVisionConfig , **__a : Any ) -> Any:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = copy.deepcopy(self.__dict__ )
__lowercase : Tuple = self.text_config.to_dict()
__lowercase : List[Any] = self.vision_config.to_dict()
__lowercase : List[str] = self.__class__.model_type
return output | 149 | 0 |
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __snake_case ( UpperCamelCase__ ) -> Any:
"""simple docstring"""
if not is_accelerate_available():
return method
A = version.parse(accelerate.__version__ ).base_version
if version.parse(snake_case_ ) < version.parse('0.17.0' ):
return method
def wrapper(self , *UpperCamelCase__ , **UpperCamelCase__ ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *snake_case_ , **snake_case_ )
return wrapper
| 718 |
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = TransfoXLTokenizer
lowerCAmelCase = False
lowerCAmelCase = False
def __a ( self : Union[str, Any] ):
super().setUp()
A = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __a ( self : int , **_lowercase : List[str] ):
A = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def __a ( self : int , _lowercase : List[Any] ):
A = '<unk> UNwanted , running'
A = '<unk> unwanted, running'
return input_text, output_text
def __a ( self : List[str] ):
A = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_lowercase )
A = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(_lowercase , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [0, 4, 8, 7] )
def __a ( self : int ):
A = TransfoXLTokenizer(lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def __a ( self : Optional[Any] ):
A = TransfoXLTokenizer(lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __a ( self : str ):
A = TransfoXLTokenizer(lower_case=_lowercase )
A = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
A = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(_lowercase ) , _lowercase )
self.assertEqual(tokenizer.convert_tokens_to_string(_lowercase ) , _lowercase )
def __a ( self : Dict ):
A = self.get_tokenizer()
A = len(_lowercase )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_lowercase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 91 | 0 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_UpperCAmelCase : Dict = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
_UpperCAmelCase : str = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
_UpperCAmelCase : str = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ), reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'], )
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : int, UpperCamelCase__ : List[Any], UpperCamelCase__ : Dict=None, UpperCamelCase__ : Dict=1, UpperCamelCase__ : List[Any]="binary", UpperCamelCase__ : str=None ) -> Optional[int]:
_A = fa_score(
UpperCamelCase__, UpperCamelCase__, labels=UpperCamelCase__, pos_label=UpperCamelCase__, average=UpperCamelCase__, sample_weight=UpperCamelCase__ )
return {"f1": float(UpperCamelCase__ ) if score.size == 1 else score}
| 107 |
"""simple docstring"""
def lowercase__(A ) ->bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218 | 0 |
import doctest
from collections import deque
import numpy as np
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] ) -> None:
SCREAMING_SNAKE_CASE__ = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE__ = [1, 2, 3, 4]
def lowercase_ ( self : List[str] ) -> list[float]:
SCREAMING_SNAKE_CASE__ = len(self.first_signal )
SCREAMING_SNAKE_CASE__ = len(self.second_signal )
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , __lowerCamelCase )
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE__ = [[0] * max_length for i in range(__lowerCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = deque(self.second_signal )
rotated_signal.rotate(__lowerCamelCase )
for j, item in enumerate(__lowerCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE__ = np.matmul(np.transpose(__lowerCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__lowerCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 472 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
a = Features({"question": Value("string" ), "context": Value("string" )} )
a = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
a = "question"
a = "context"
a = "answers"
@property
def lowercase_ ( self : Dict ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 472 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase_ = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for i in range(0 , SCREAMING_SNAKE_CASE ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for i in range(SCREAMING_SNAKE_CASE , 0 , -1 ):
for _ in range(SCREAMING_SNAKE_CASE , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(SCREAMING_SNAKE_CASE ) # upper half
reverse_floyd(SCREAMING_SNAKE_CASE ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
_UpperCamelCase = 1
while K:
_UpperCamelCase = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
_UpperCamelCase = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 111 | 0 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = k_size // 2
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__SCREAMING_SNAKE_CASE : Dict = 1 / (2 * pi * sigma) * exp(-(square(snake_case ) + square(snake_case )) / (2 * square(snake_case )) )
return g
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = image.shape[0], image.shape[1]
# dst image height and width
__SCREAMING_SNAKE_CASE : str = height - k_size + 1
__SCREAMING_SNAKE_CASE : int = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__SCREAMING_SNAKE_CASE : Any = zeros((dst_height * dst_width, k_size * k_size) )
__SCREAMING_SNAKE_CASE : List[str] = 0
for i, j in product(range(snake_case ) , range(snake_case ) ):
__SCREAMING_SNAKE_CASE : int = ravel(image[i : i + k_size, j : j + k_size] )
__SCREAMING_SNAKE_CASE : Tuple = window
row += 1
# turn the kernel into shape(k*k, 1)
__SCREAMING_SNAKE_CASE : int = gen_gaussian_kernel(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Dict = ravel(snake_case )
# reshape and get the dst image
__SCREAMING_SNAKE_CASE : str = dot(snake_case , snake_case ).reshape(snake_case , snake_case ).astype(snake_case )
return dst
if __name__ == "__main__":
# read original image
lowercase_ = imread(R"""../image_data/lena.jpg""")
# turn image in gray scale value
lowercase_ = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
lowercase_ = gaussian_filter(gray, 3, sigma=1)
lowercase_ = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("""gaussian filter with 3x3 mask""", gaussianaxa)
imshow("""gaussian filter with 5x5 mask""", gaussianaxa)
waitKey()
| 131 |
import math
import random
def a__ ( snake_case , snake_case = False ):
"""simple docstring"""
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowercase_ = 0.02
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(snake_case ):
# Forward propagation
__SCREAMING_SNAKE_CASE : Union[str, Any] = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__SCREAMING_SNAKE_CASE : Union[str, Any] = (expected / 100) - layer_a
# Error delta
__SCREAMING_SNAKE_CASE : Optional[int] = layer_1_error * sigmoid_function(snake_case , snake_case )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = int(input("""Expected value: """))
lowercase_ = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 131 | 1 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_A : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_A : List[Any] = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=8 ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase__ : str = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : Any , A : UNetaDConditionModel , A : DDPMScheduler , A : VQModel , ) ->List[str]:
super().__init__()
self.register_modules(
unet=A , scheduler=A , movq=A , )
lowerCamelCase__ : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self : List[Any] , A : List[str] , A : Union[str, Any] , A : Any , A : Optional[Any] , A : List[str] , A : Optional[int] ) ->Union[str, Any]:
if latents is None:
lowerCamelCase__ : List[Any] = randn_tensor(A , generator=A , device=A , dtype=A )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
lowerCamelCase__ : Optional[int] = latents.to(A )
lowerCamelCase__ : Optional[Any] = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self : Optional[Any] , A : Tuple=0 ) ->List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowerCamelCase__ : Optional[Any] = torch.device(F"cuda:{gpu_id}" )
lowerCamelCase__ : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A , A )
def __lowerCamelCase ( self : int , A : Tuple=0 ) ->Optional[Any]:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
lowerCamelCase__ : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase__ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = cpu_offload_with_hook(A , A , prev_module_hook=A )
# We'll offload the last model manually.
lowerCamelCase__ : Union[str, Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self : List[Any] ) ->Dict:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A )
def __call__( self : int , A : Union[torch.FloatTensor, List[torch.FloatTensor]] , A : Union[torch.FloatTensor, List[torch.FloatTensor]] , A : int = 5_1_2 , A : int = 5_1_2 , A : int = 1_0_0 , A : float = 4.0 , A : int = 1 , A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , ) ->Dict:
lowerCamelCase__ : int = self._execution_device
lowerCamelCase__ : Union[str, Any] = guidance_scale > 1.0
if isinstance(A , A ):
lowerCamelCase__ : Dict = torch.cat(A , dim=0 )
lowerCamelCase__ : Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(A , A ):
lowerCamelCase__ : Dict = torch.cat(A , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ : Optional[int] = image_embeds.repeat_interleave(A , dim=0 )
lowerCamelCase__ : List[str] = negative_image_embeds.repeat_interleave(A , dim=0 )
lowerCamelCase__ : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A )
self.scheduler.set_timesteps(A , device=A )
lowerCamelCase__ : Optional[Any] = self.scheduler.timesteps
lowerCamelCase__ : str = self.unet.config.in_channels
lowerCamelCase__ , lowerCamelCase__ : int = downscale_height_and_width(A , A , self.movq_scale_factor )
# create initial latent
lowerCamelCase__ : List[str] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A , A , A , self.scheduler , )
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ : Dict = {'''image_embeds''': image_embeds}
lowerCamelCase__ : str = self.unet(
sample=A , timestep=A , encoder_hidden_states=A , added_cond_kwargs=A , return_dict=A , )[0]
if do_classifier_free_guidance:
lowerCamelCase__ , lowerCamelCase__ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = noise_pred.chunk(2 )
lowerCamelCase__ , lowerCamelCase__ : Dict = variance_pred.chunk(2 )
lowerCamelCase__ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase__ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase__ , lowerCamelCase__ : Any = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : Dict = self.scheduler.step(
A , A , A , generator=A , )[0]
# post-processing
lowerCamelCase__ : List[Any] = self.movq.decode(A , force_not_quantize=A )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowerCamelCase__ : str = image * 0.5 + 0.5
lowerCamelCase__ : Optional[int] = image.clamp(0 , 1 )
lowerCamelCase__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase__ : List[str] = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 315 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_A : List[str] = logging.getLogger(__name__)
_A : Any = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_A : Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __SCREAMING_SNAKE_CASE :
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} ,)
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase_ )} ,)
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} ,)
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
_UpperCAmelCase : bool = field(
default=lowerCAmelCase_ ,metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} ,)
_UpperCAmelCase : str = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
_UpperCAmelCase : bool = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
def __lowerCamelCase ( self : int ) ->Optional[Any]:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class __SCREAMING_SNAKE_CASE :
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "The name of the dataset to use (via the datasets library)."} )
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
_UpperCAmelCase : Optional[str] = field(default=lowerCAmelCase_ ,metadata={"help": "The input training data file (a text file)."} )
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} ,)
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "An optional input train ref data file for whole word masking in Chinese."} ,)
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."} ,)
_UpperCAmelCase : bool = field(
default=lowerCAmelCase_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
_UpperCAmelCase : Optional[int] = field(
default=5 ,metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} ,)
_UpperCAmelCase : Optional[int] = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated. Default to the max input length of the model."
)
} ,)
_UpperCAmelCase : Optional[int] = field(
default=lowerCAmelCase_ ,metadata={"help": "The number of processes to use for the preprocessing."} ,)
_UpperCAmelCase : float = field(
default=0.15 ,metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
_UpperCAmelCase : bool = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} ,)
def __lowerCamelCase ( self : List[Any] ) ->List[str]:
if self.train_file is not None:
lowerCamelCase__ : int = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase__ : Tuple = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
lowerCamelCase__ : str = [json.loads(UpperCAmelCase ) for line in f.read().splitlines() if (len(UpperCAmelCase ) > 0 and not line.isspace())]
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
lowerCamelCase__ : int = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase__ : str = refs
return Dataset.from_dict(UpperCAmelCase )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase__ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase__ : Union[str, Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowerCamelCase__ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"train[:{data_args.validation_split_percentage}%]" , )
lowerCamelCase__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"train[{data_args.validation_split_percentage}%:]" , )
else:
lowerCamelCase__ : List[Any] = {}
if data_args.train_file is not None:
lowerCamelCase__ : List[Any] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase__ : Optional[int] = data_args.validation_file
lowerCamelCase__ : Tuple = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
lowerCamelCase__ : List[Any] = '''text'''
lowerCamelCase__ : Tuple = load_dataset(UpperCAmelCase , data_files=UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ : Optional[int] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase__ : int = AutoConfig.from_pretrained(model_args.config_name , **UpperCAmelCase )
elif model_args.model_name_or_path:
lowerCamelCase__ : Dict = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase )
else:
lowerCamelCase__ : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
lowerCamelCase__ : List[str] = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase__ : List[Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **UpperCAmelCase )
elif model_args.model_name_or_path:
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
lowerCamelCase__ : Tuple = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowerCamelCase__ : List[Any] = AutoModelForMaskedLM.from_config(UpperCAmelCase )
model.resize_token_embeddings(len(UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase__ : Optional[int] = datasets['''train'''].column_names
else:
lowerCamelCase__ : Optional[int] = datasets['''validation'''].column_names
lowerCamelCase__ : List[str] = '''text''' if '''text''' in column_names else column_names[0]
lowerCamelCase__ : Dict = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(UpperCAmelCase ):
# Remove empty lines
lowerCamelCase__ : int = [line for line in examples['''text'''] if len(UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=data_args.max_seq_length )
lowerCamelCase__ : Optional[int] = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase__ : Optional[Any] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowerCamelCase__ : str = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase__ : int = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase__ : Tuple = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase__ : List[Any] = DataCollatorForWholeWordMask(tokenizer=UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCamelCase__ : Tuple = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase__ : Optional[Any] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowerCamelCase__ : Union[str, Any] = model_args.model_name_or_path
else:
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Dict = trainer.train(resume_from_checkpoint=UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase__ : Union[str, Any] = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
lowerCamelCase__ : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase__ : Tuple = trainer.evaluate()
lowerCamelCase__ : Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
lowerCamelCase__ : str = perplexity
lowerCamelCase__ : List[Any] = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
return results
def _a ( UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 315 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''open-llama'''
def __init__( self : List[Any] , UpperCAmelCase__ : Any=10_0000 , UpperCAmelCase__ : Union[str, Any]=4096 , UpperCAmelCase__ : int=1_1008 , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : Union[str, Any]="silu" , UpperCAmelCase__ : List[str]=2048 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : Dict=1e-6 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : int=None , **UpperCAmelCase__ : str , ) ->int:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = kwargs.pop(
'''use_memorry_efficient_attention''' , UpperCAmelCase__ )
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_dropout_prob
UpperCAmelCase_ = use_stable_embedding
UpperCAmelCase_ = shared_input_output_embedding
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , tie_word_embeddings=UpperCAmelCase__ , **UpperCAmelCase__ , )
def lowerCAmelCase__ ( self : str ) ->Tuple:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCAmelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
UpperCAmelCase_ = self.rope_scaling.get('''type''' , UpperCAmelCase__ )
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , UpperCAmelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 43 | '''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = XLMTokenizer
lowerCAmelCase__ = False
def lowerCAmelCase__ ( self : int ) ->Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
UpperCAmelCase_ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : Any ) ->List[Any]:
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = '''lower newer'''
return input_text, output_text
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
UpperCAmelCase_ = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase_ = '''lower'''
UpperCAmelCase_ = ['''low''', '''er</w>''']
UpperCAmelCase_ = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = tokens + ['''<unk>''']
UpperCAmelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Any ) ->str:
UpperCAmelCase_ = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
UpperCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 43 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCAmelCase_ ( __lowerCamelCase : Tuple ):
lowercase_ :Union[str, Any] = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
lowercase_ :int = True if "large" in model_name or "huge" in model_name else False
lowercase_ :Union[str, Any] = True if "large" in model_name or "huge" in model_name else False
lowercase_ :List[str] = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase_ :Optional[int] = [3, 3, 3, 3]
lowercase_ :Union[str, Any] = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase_ :Any = [4, 4, 4, 4]
lowercase_ :List[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase_ :List[str] = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase_ :Optional[Any] = [3, 3, 3, 3]
else:
lowercase_ :Any = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase_ :int = 96
elif "small" in model_name:
lowercase_ :List[Any] = 96
elif "base" in model_name:
lowercase_ :List[str] = 1_28
elif "large" in model_name:
lowercase_ :Any = 1_92
elif "xlarge" in model_name:
lowercase_ :Any = 2_56
elif "huge" in model_name:
lowercase_ :Tuple = 3_52
# set label information
lowercase_ :Tuple = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
lowercase_ :Optional[int] = "imagenet-22k-id2label.json"
else:
lowercase_ :Dict = "imagenet-1k-id2label.json"
lowercase_ :Dict = json.load(open(hf_hub_download(__lowerCamelCase ,__lowerCamelCase ,repo_type="dataset" ) ,"r" ) )
lowercase_ :Dict = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase_ :Dict = {v: k for k, v in idalabel.items()}
lowercase_ :Optional[int] = FocalNetConfig(
embed_dim=__lowerCamelCase ,depths=__lowerCamelCase ,focal_levels=__lowerCamelCase ,focal_windows=__lowerCamelCase ,use_conv_embed=__lowerCamelCase ,idalabel=__lowerCamelCase ,labelaid=__lowerCamelCase ,use_post_layernorm=__lowerCamelCase ,use_layerscale=__lowerCamelCase ,)
return config
def UpperCAmelCase_ ( __lowerCamelCase : Tuple ):
if "patch_embed.proj" in name:
lowercase_ :Optional[int] = name.replace("patch_embed.proj" ,"embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowercase_ :int = name.replace("patch_embed.norm" ,"embeddings.norm" )
if "layers" in name:
lowercase_ :List[Any] = "encoder." + name
if "encoder.layers" in name:
lowercase_ :List[Any] = name.replace("encoder.layers" ,"encoder.stages" )
if "downsample.proj" in name:
lowercase_ :List[Any] = name.replace("downsample.proj" ,"downsample.projection" )
if "blocks" in name:
lowercase_ :Any = name.replace("blocks" ,"layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase_ :Tuple = name.replace("modulation.f" ,"modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase_ :Dict = name.replace("modulation.h" ,"modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase_ :Optional[int] = name.replace("modulation.proj" ,"modulation.projection_out" )
if name == "norm.weight":
lowercase_ :Any = "layernorm.weight"
if name == "norm.bias":
lowercase_ :Optional[int] = "layernorm.bias"
if "head" in name:
lowercase_ :List[Any] = name.replace("head" ,"classifier" )
else:
lowercase_ :Optional[int] = "focalnet." + name
return name
def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : int ,__lowerCamelCase : List[Any]=False ):
# fmt: off
lowercase_ :Dict = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
lowercase_ :int = model_name_to_url[model_name]
print("Checkpoint URL: " ,__lowerCamelCase )
lowercase_ :str = torch.hub.load_state_dict_from_url(__lowerCamelCase ,map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
lowercase_ :Tuple = state_dict.pop(__lowerCamelCase )
lowercase_ :Tuple = val
lowercase_ :List[str] = get_focalnet_config(__lowerCamelCase )
lowercase_ :List[Any] = FocalNetForImageClassification(__lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(__lowerCamelCase )
# verify conversion
lowercase_ :List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase_ :List[Any] = BitImageProcessor(
do_resize=__lowerCamelCase ,size={"shortest_edge": 2_56} ,resample=PILImageResampling.BILINEAR ,do_center_crop=__lowerCamelCase ,crop_size=2_24 ,do_normalize=__lowerCamelCase ,image_mean=__lowerCamelCase ,image_std=__lowerCamelCase ,)
lowercase_ :List[str] = Image.open(requests.get(__lowerCamelCase ,stream=__lowerCamelCase ).raw )
lowercase_ :Optional[int] = processor(images=__lowerCamelCase ,return_tensors="pt" )
lowercase_ :Tuple = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
lowercase_ :List[str] = image_transforms(__lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,__lowerCamelCase ,atol=1e-4 )
lowercase_ :Union[str, Any] = model(**__lowerCamelCase )
lowercase_ :Tuple = outputs.logits.argmax(-1 ).item()
print("Predicted class:" ,model.config.idalabel[predicted_class_idx] )
print("First values of logits:" ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase_ :str = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowercase_ :Any = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowercase_ :str = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowercase_ :int = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowercase_ :Optional[int] = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowercase_ :List[str] = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] ,__lowerCamelCase ,atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(F'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(F'{model_name}' )
processor.push_to_hub(F'{model_name}' )
if __name__ == "__main__":
lowerCAmelCase : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase : Tuple =parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 172 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCAmelCase : Union[str, Any] =2
class a_ :
def __init__( self : int , *, # begin keyword-only arguments
lowercase : str="<s>" , lowercase : List[str]="<pad>" , lowercase : str="</s>" , lowercase : str="<unk>" , lowercase : List[Any]=None , ):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ , lowercase_ :Dict = bos, unk, pad, eos
lowercase_ :str = []
lowercase_ :Optional[int] = []
lowercase_ :Union[str, Any] = {}
lowercase_ :Any = self.add_symbol(lowercase )
lowercase_ :List[Any] = self.add_symbol(lowercase )
lowercase_ :Optional[int] = self.add_symbol(lowercase )
lowercase_ :int = self.add_symbol(lowercase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowercase )
lowercase_ :Optional[Any] = len(self.symbols )
def __eq__( self : str , lowercase : Dict ):
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : Optional[int] , lowercase : Tuple ):
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Optional[Any] ):
"""simple docstring"""
return len(self.symbols )
def __contains__( self : List[str] , lowercase : Union[str, Any] ):
"""simple docstring"""
return sym in self.indices
@classmethod
def lowercase__ ( cls : Optional[int] , lowercase : Union[str, Any] ):
"""simple docstring"""
lowercase_ :str = cls()
d.add_from_file(lowercase )
return d
def lowercase__ ( self : Dict , lowercase : Dict , lowercase : Any=1 , lowercase : str=False ):
"""simple docstring"""
if word in self.indices and not overwrite:
lowercase_ :Optional[int] = self.indices[word]
lowercase_ :int = self.count[idx] + n
return idx
else:
lowercase_ :int = len(self.symbols )
lowercase_ :List[Any] = idx
self.symbols.append(lowercase )
self.count.append(lowercase )
return idx
def lowercase__ ( self : List[str] , lowercase : Dict ):
"""simple docstring"""
return 0
def lowercase__ ( self : int , lowercase : List[Any] ):
"""simple docstring"""
if isinstance(lowercase , lowercase ):
try:
with open(lowercase , "r" , encoding="utf-8" ) as fd:
self.add_from_file(lowercase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(lowercase ) )
return
lowercase_ :Optional[int] = f.readlines()
lowercase_ :Union[str, Any] = self._load_meta(lowercase )
for line in lines[indices_start_line:]:
try:
lowercase_ , lowercase_ :List[Any] = line.rstrip().rsplit(" " , 1 )
if field == "#fairseq:overwrite":
lowercase_ :Optional[int] = True
lowercase_ , lowercase_ :List[Any] = line.rsplit(" " , 1 )
else:
lowercase_ :str = False
lowercase_ :str = int(lowercase )
lowercase_ :Optional[Any] = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(lowercase ) )
self.add_symbol(lowercase , n=lowercase , overwrite=lowercase )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def UpperCAmelCase_ ( __lowerCamelCase : Any ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowercase_ :Dict = dict((re.sub(r"@@$" ,"" ,__lowerCamelCase ), v) if k.endswith("@@" ) else (re.sub(r"$" ,"</w>" ,__lowerCamelCase ), v) for k, v in d.items() )
lowercase_ :Any = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
lowercase_ :str = d[k] # restore
return da
def UpperCAmelCase_ ( __lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Optional[int] ):
# prep
if not os.path.exists(__lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(__lowerCamelCase ,exist_ok=__lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
lowercase_ :Dict = os.path.join(__lowerCamelCase ,"checkpoint.pt" )
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
lowercase_ :str = torch.load(__lowerCamelCase ,map_location="cpu" )
lowercase_ :Optional[int] = chkpt["cfg"]["model"]
# dicts
lowercase_ :str = os.path.join(__lowerCamelCase ,"dict.txt" )
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
lowercase_ :Tuple = Dictionary.load(__lowerCamelCase )
lowercase_ :int = rewrite_dict_keys(src_dict.indices )
lowercase_ :List[Any] = len(__lowerCamelCase )
lowercase_ :Tuple = os.path.join(__lowerCamelCase ,VOCAB_FILES_NAMES["vocab_file"] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(__lowerCamelCase ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(__lowerCamelCase ,ensure_ascii=__lowerCamelCase ,indent=__lowerCamelCase ) )
# merges_file (bpecodes)
lowercase_ :Dict = os.path.join(__lowerCamelCase ,"bpecodes" )
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
lowercase_ :List[str] = os.path.join(__lowerCamelCase ,VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(__lowerCamelCase ,__lowerCamelCase )
# model config
lowercase_ :Optional[Any] = os.path.join(__lowerCamelCase ,"config.json" )
lowercase_ :Union[str, Any] = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1e-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(__lowerCamelCase ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(__lowerCamelCase ,ensure_ascii=__lowerCamelCase ,indent=__lowerCamelCase ) )
# tokenizer config
lowercase_ :Dict = os.path.join(__lowerCamelCase ,__lowerCamelCase )
lowercase_ :List[str] = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 10_24,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(__lowerCamelCase ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(__lowerCamelCase ,ensure_ascii=__lowerCamelCase ,indent=__lowerCamelCase ) )
# model
lowercase_ :str = chkpt["model"]
# remove unneeded keys
lowercase_ :str = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(__lowerCamelCase ,__lowerCamelCase )
lowercase_ :Optional[int] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
lowercase_ :List[Any] = model_state_dict.pop(__lowerCamelCase )
else:
lowercase_ :List[Any] = model_state_dict.pop(__lowerCamelCase )
lowercase_ :int = BioGptConfig.from_pretrained(__lowerCamelCase )
lowercase_ :Union[str, Any] = BioGptForCausalLM(__lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(__lowerCamelCase )
# save
lowercase_ :int = os.path.join(__lowerCamelCase ,__lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(__lowerCamelCase ,__lowerCamelCase )
print("Conversion is done!" )
if __name__ == "__main__":
lowerCAmelCase : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase : List[str] =parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 172 | 1 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowercase_ : Optional[Any] = logging.get_logger(__name__)
# General docstring
lowercase_ : int = 'MobileNetV1Config'
# Base docstring
lowercase_ : Tuple = 'google/mobilenet_v1_1.0_224'
lowercase_ : Tuple = [1, 1_0_2_4, 7, 7]
# Image classification docstring
lowercase_ : List[Any] = 'google/mobilenet_v1_1.0_224'
lowercase_ : List[str] = 'tabby, tabby cat'
lowercase_ : int = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def A__ ( snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : str=None ):
SCREAMING_SNAKE_CASE__: Dict= {}
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__: int= model.mobilenet_va
else:
SCREAMING_SNAKE_CASE__: Optional[int]= model
SCREAMING_SNAKE_CASE__: str= '''MobilenetV1/Conv2d_0/'''
SCREAMING_SNAKE_CASE__: str= backbone.conv_stem.convolution.weight
SCREAMING_SNAKE_CASE__: str= backbone.conv_stem.normalization.bias
SCREAMING_SNAKE_CASE__: Tuple= backbone.conv_stem.normalization.weight
SCREAMING_SNAKE_CASE__: str= backbone.conv_stem.normalization.running_mean
SCREAMING_SNAKE_CASE__: Union[str, Any]= backbone.conv_stem.normalization.running_var
for i in range(13 ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= i + 1
SCREAMING_SNAKE_CASE__: List[str]= i * 2
SCREAMING_SNAKE_CASE__: List[str]= backbone.layer[pt_index]
SCREAMING_SNAKE_CASE__: List[str]= F'MobilenetV1/Conv2d_{tf_index}_depthwise/'
SCREAMING_SNAKE_CASE__: Tuple= pointer.convolution.weight
SCREAMING_SNAKE_CASE__: List[str]= pointer.normalization.bias
SCREAMING_SNAKE_CASE__: Tuple= pointer.normalization.weight
SCREAMING_SNAKE_CASE__: Dict= pointer.normalization.running_mean
SCREAMING_SNAKE_CASE__: str= pointer.normalization.running_var
SCREAMING_SNAKE_CASE__: int= backbone.layer[pt_index + 1]
SCREAMING_SNAKE_CASE__: List[str]= F'MobilenetV1/Conv2d_{tf_index}_pointwise/'
SCREAMING_SNAKE_CASE__: int= pointer.convolution.weight
SCREAMING_SNAKE_CASE__: Optional[int]= pointer.normalization.bias
SCREAMING_SNAKE_CASE__: int= pointer.normalization.weight
SCREAMING_SNAKE_CASE__: Any= pointer.normalization.running_mean
SCREAMING_SNAKE_CASE__: Union[str, Any]= pointer.normalization.running_var
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__: Tuple= '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
SCREAMING_SNAKE_CASE__: List[str]= model.classifier.weight
SCREAMING_SNAKE_CASE__: List[Any]= model.classifier.bias
return tf_to_pt_map
def A__ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Any ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
SCREAMING_SNAKE_CASE__: List[Any]= tf.train.list_variables(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__: List[Any]= {}
for name, shape in init_vars:
logger.info(F'Loading TF weight {name} with shape {shape}' )
SCREAMING_SNAKE_CASE__: str= tf.train.load_variable(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__: Any= array
# Build TF to PyTorch weights loading map
SCREAMING_SNAKE_CASE__: Dict= _build_tf_to_pytorch_map(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'Importing {name}' )
if name not in tf_weights:
logger.info(F'{name} not in tf pre-trained weights, skipping' )
continue
SCREAMING_SNAKE_CASE__: Tuple= tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
SCREAMING_SNAKE_CASE__: Optional[int]= np.transpose(UpperCAmelCase__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
SCREAMING_SNAKE_CASE__: Optional[int]= array.squeeze().transpose()
else:
SCREAMING_SNAKE_CASE__: Any= np.transpose(UpperCAmelCase__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(F'Initialize PyTorch weight {name} {array.shape}' )
SCREAMING_SNAKE_CASE__: Dict= torch.from_numpy(UpperCAmelCase__ )
tf_weights.pop(UpperCAmelCase__ , UpperCAmelCase__ )
tf_weights.pop(name + '''/RMSProp''' , UpperCAmelCase__ )
tf_weights.pop(name + '''/RMSProp_1''' , UpperCAmelCase__ )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , UpperCAmelCase__ )
logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def A__ ( snake_case_ : torch.Tensor , snake_case_ : nn.Convad ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= features.shape[-2:]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= conv_layer.stride
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= conv_layer.kernel_size
if in_height % stride_height == 0:
SCREAMING_SNAKE_CASE__: int= max(kernel_height - stride_height , 0 )
else:
SCREAMING_SNAKE_CASE__: Dict= max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
SCREAMING_SNAKE_CASE__: List[str]= max(kernel_width - stride_width , 0 )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= max(kernel_width - (in_width % stride_width) , 0 )
SCREAMING_SNAKE_CASE__: Optional[Any]= pad_along_width // 2
SCREAMING_SNAKE_CASE__: Optional[int]= pad_along_width - pad_left
SCREAMING_SNAKE_CASE__: Dict= pad_along_height // 2
SCREAMING_SNAKE_CASE__: Union[str, Any]= pad_along_height - pad_top
SCREAMING_SNAKE_CASE__: Tuple= (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(UpperCAmelCase__ , UpperCAmelCase__ , '''constant''' , 0.0 )
class _lowerCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1 , lowerCAmelCase = 1 , lowerCAmelCase = False , lowerCAmelCase = True , lowerCAmelCase = True , ) -> Tuple:
super().__init__()
SCREAMING_SNAKE_CASE__: str= config
if in_channels % groups != 0:
raise ValueError(f'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(f'Output channels ({out_channels}) are not divisible by {groups} groups.' )
SCREAMING_SNAKE_CASE__: List[str]= 0 if config.tf_padding else int((kernel_size - 1) / 2 )
SCREAMING_SNAKE_CASE__: Union[str, Any]= nn.Convad(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , kernel_size=__UpperCamelCase , stride=__UpperCamelCase , padding=__UpperCamelCase , groups=__UpperCamelCase , bias=__UpperCamelCase , padding_mode='''zeros''' , )
if use_normalization:
SCREAMING_SNAKE_CASE__: Any= nn.BatchNormad(
num_features=__UpperCamelCase , eps=config.layer_norm_eps , momentum=0.9997 , affine=__UpperCamelCase , track_running_stats=__UpperCamelCase , )
else:
SCREAMING_SNAKE_CASE__: Any= None
if use_activation:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__: int= ACTaFN[use_activation]
elif isinstance(config.hidden_act , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__: Optional[Any]= ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE__: str= config.hidden_act
else:
SCREAMING_SNAKE_CASE__: Tuple= None
def UpperCamelCase_ ( self , lowerCAmelCase ) -> List[Any]:
if self.config.tf_padding:
SCREAMING_SNAKE_CASE__: Any= apply_tf_padding(__UpperCamelCase , self.convolution )
SCREAMING_SNAKE_CASE__: Dict= self.convolution(__UpperCamelCase )
if self.normalization is not None:
SCREAMING_SNAKE_CASE__: List[Any]= self.normalization(__UpperCamelCase )
if self.activation is not None:
SCREAMING_SNAKE_CASE__: Any= self.activation(__UpperCamelCase )
return features
class _lowerCamelCase ( UpperCamelCase_ ):
__a = MobileNetVaConfig
__a = load_tf_weights_in_mobilenet_va
__a = "mobilenet_v1"
__a = "pixel_values"
__a = False
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Any:
if isinstance(__UpperCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__UpperCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowercase_ : List[Any] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowercase_ : Any = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , UpperCamelCase_ , )
class _lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , lowerCAmelCase , lowerCAmelCase = True ) -> str:
super().__init__(__UpperCamelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= config
SCREAMING_SNAKE_CASE__: str= 32
SCREAMING_SNAKE_CASE__: Optional[int]= max(int(depth * config.depth_multiplier ) , config.min_depth )
SCREAMING_SNAKE_CASE__: Optional[Any]= MobileNetVaConvLayer(
__UpperCamelCase , in_channels=config.num_channels , out_channels=__UpperCamelCase , kernel_size=3 , stride=2 , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
SCREAMING_SNAKE_CASE__: Dict= nn.ModuleList()
for i in range(13 ):
SCREAMING_SNAKE_CASE__: List[Any]= out_channels
if strides[i] == 2 or i == 0:
depth *= 2
SCREAMING_SNAKE_CASE__: Dict= max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__UpperCamelCase , in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , kernel_size=3 , stride=strides[i] , groups=__UpperCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
__UpperCamelCase , in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , kernel_size=1 , ) )
SCREAMING_SNAKE_CASE__: int= nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
raise NotImplementedError
@add_start_docstrings_to_model_forward(__UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase_ ( self , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__: int= return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
SCREAMING_SNAKE_CASE__: Any= self.conv_stem(__UpperCamelCase )
SCREAMING_SNAKE_CASE__: Dict= () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
SCREAMING_SNAKE_CASE__: str= layer_module(__UpperCamelCase )
if output_hidden_states:
SCREAMING_SNAKE_CASE__: str= all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE__: str= hidden_states
if self.pooler is not None:
SCREAMING_SNAKE_CASE__: Dict= torch.flatten(self.pooler(__UpperCamelCase ) , start_dim=1 )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__UpperCamelCase , pooler_output=__UpperCamelCase , hidden_states=__UpperCamelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCamelCase_ , )
class _lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , lowerCAmelCase ) -> str:
super().__init__(__UpperCamelCase )
SCREAMING_SNAKE_CASE__: str= config.num_labels
SCREAMING_SNAKE_CASE__: Any= MobileNetVaModel(__UpperCamelCase )
SCREAMING_SNAKE_CASE__: int= self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
SCREAMING_SNAKE_CASE__: int= nn.Dropout(config.classifier_dropout_prob , inplace=__UpperCamelCase )
SCREAMING_SNAKE_CASE__: List[Any]= nn.Linear(__UpperCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase_ ( self , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ) -> int:
SCREAMING_SNAKE_CASE__: Optional[int]= return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__: Any= self.mobilenet_va(__UpperCamelCase , output_hidden_states=__UpperCamelCase , return_dict=__UpperCamelCase )
SCREAMING_SNAKE_CASE__: int= outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE__: Optional[Any]= self.classifier(self.dropout(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE__: List[Any]= None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__: int= '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE__: List[str]= '''single_label_classification'''
else:
SCREAMING_SNAKE_CASE__: Optional[int]= '''multi_label_classification'''
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE__: int= MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__: List[str]= loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE__: Tuple= loss_fct(__UpperCamelCase , __UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE__: Optional[int]= CrossEntropyLoss()
SCREAMING_SNAKE_CASE__: Tuple= loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE__: List[Any]= BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE__: Optional[int]= loss_fct(__UpperCamelCase , __UpperCamelCase )
if not return_dict:
SCREAMING_SNAKE_CASE__: Optional[Any]= (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__UpperCamelCase , logits=__UpperCamelCase , hidden_states=outputs.hidden_states , )
| 715 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ : List[Any] = logging.get_logger(__name__)
lowercase_ : List[Any] = {
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _lowerCamelCase ( UpperCamelCase_ ):
__a = "xlm-roberta-xl"
def __init__( self , lowerCAmelCase=250880 , lowerCAmelCase=2560 , lowerCAmelCase=36 , lowerCAmelCase=32 , lowerCAmelCase=10240 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=514 , lowerCAmelCase=1 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-05 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ) -> Union[str, Any]:
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= vocab_size
SCREAMING_SNAKE_CASE__: Dict= hidden_size
SCREAMING_SNAKE_CASE__: Tuple= num_hidden_layers
SCREAMING_SNAKE_CASE__: Optional[Any]= num_attention_heads
SCREAMING_SNAKE_CASE__: int= hidden_act
SCREAMING_SNAKE_CASE__: Any= intermediate_size
SCREAMING_SNAKE_CASE__: Optional[Any]= hidden_dropout_prob
SCREAMING_SNAKE_CASE__: str= attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__: List[Any]= max_position_embeddings
SCREAMING_SNAKE_CASE__: Dict= type_vocab_size
SCREAMING_SNAKE_CASE__: Union[str, Any]= initializer_range
SCREAMING_SNAKE_CASE__: Dict= layer_norm_eps
SCREAMING_SNAKE_CASE__: Dict= position_embedding_type
SCREAMING_SNAKE_CASE__: Optional[Any]= use_cache
SCREAMING_SNAKE_CASE__: str= classifier_dropout
class _lowerCamelCase ( UpperCamelCase_ ):
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__: Dict= {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__: List[str]= {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 107 | 0 |
'''simple docstring'''
import os
import numpy
import onnx
def UpperCamelCase ( a , a ) -> List[Any]:
'''simple docstring'''
__magic_name__ = a.name
__magic_name__ = b.name
__magic_name__ = ''''''
__magic_name__ = ''''''
__magic_name__ = a == b
__magic_name__ = name_a
__magic_name__ = name_b
return res
def UpperCamelCase ( a , a , a ) -> List[Any]:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(a , a )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , a , a )
_graph_replace_input_with(node_proto.attribute[1].g , a , a )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , a , a )
def UpperCamelCase ( a , a , a ) -> Any:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(a , a , a )
def UpperCamelCase ( a , a , a ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = list(model.graph.initializer )
__magic_name__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__magic_name__ = inits[i].name
__magic_name__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , a , a )
def UpperCamelCase ( a ) -> Any:
'''simple docstring'''
__magic_name__ = os.path.dirname(a )
__magic_name__ = os.path.basename(a )
__magic_name__ = onnx.load(os.path.join(a , a ) )
__magic_name__ = list(model.graph.initializer )
__magic_name__ = set()
__magic_name__ = {}
__magic_name__ = []
__magic_name__ = 0
for i in range(len(a ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(a ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(a )
dup_set.add(a )
__magic_name__ = inits[j].data_type
__magic_name__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , a )
total_reduced_size += mem_size
__magic_name__ = inits[i].name
__magic_name__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(a )
else:
__magic_name__ = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1024 / 1024 / 1024 , '''GB''' )
__magic_name__ = sorted(a )
_remove_dup_initializers_from_model(a , a , a )
__magic_name__ = '''optimized_''' + model_file_name
__magic_name__ = os.path.join(a , a )
onnx.save(a , a )
return new_model
| 432 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Optional[int] = """ClapFeatureExtractor"""
__SCREAMING_SNAKE_CASE :List[Any] = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : Optional[Any] , a__ : Dict , a__ : Dict ):
super().__init__(a__ , a__ )
def __call__( self : Dict , a__ : List[str]=None , a__ : List[Any]=None , a__ : Any=None , **a__ : Tuple ):
__magic_name__ = kwargs.pop('''sampling_rate''' , a__ )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
__magic_name__ = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if audios is not None:
__magic_name__ = self.feature_extractor(
a__ , sampling_rate=a__ , return_tensors=a__ , **a__ )
if text is not None and audios is not None:
__magic_name__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def snake_case__ ( self : List[Any] , *a__ : str , **a__ : List[str] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def snake_case__ ( self : int , *a__ : Tuple , **a__ : Tuple ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def snake_case__ ( self : Any ):
__magic_name__ = self.tokenizer.model_input_names
__magic_name__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 432 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__lowerCamelCase : int = False
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
__magic_name__ : Tuple = '''A painting of a squirrel eating a burger '''
__magic_name__ : int = torch.manual_seed(0 )
__magic_name__ : int = pipe(
prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
__magic_name__ : Dict = VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
__magic_name__ : Optional[Any] = generator.manual_seed(0 )
__magic_name__ : Tuple = pipe(
prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
__magic_name__ : List[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
__magic_name__ : List[Any] = '''A painting of a squirrel eating a burger '''
__magic_name__ : List[Any] = torch.manual_seed(0 )
__magic_name__ : Dict = pipe(
prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
__magic_name__ : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__magic_name__ : int = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 501 |
import sys
import turtle
def lowercase__ ( __A: tuple[float, float] ,__A: tuple[float, float] ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase__ ( __A: tuple[float, float] ,__A: tuple[float, float] ,__A: tuple[float, float] ,__A: int ,):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
if depth == 0:
return
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
__lowerCamelCase : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
__lowerCamelCase : Optional[Any] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 501 | 1 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> Tuple:
A__ = ["a", "b", "c"]
# Defaults to last layer if both are None
A__ = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
A__ = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
A__ = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
A__ = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def snake_case__ ( self ) -> List[Any]:
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def snake_case__ ( self ) -> Tuple:
A__ = BackboneMixin()
A__ = ["a", "b", "c"]
A__ = ["a", "c"]
A__ = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
A__ = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
A__ = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 104 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCAmelCase : Optional[Any] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _A ( A ,A ,A ,A ,A ) -> Tuple:
for attribute in key.split("." ):
lowercase : Dict = getattr(A ,A )
if weight_type is not None:
lowercase : List[str] = getattr(A ,A ).shape
else:
lowercase : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase : List[Any] = value
elif weight_type == "weight_g":
lowercase : List[Any] = value
elif weight_type == "weight_v":
lowercase : int = value
elif weight_type == "bias":
lowercase : Any = value
elif weight_type == "running_mean":
lowercase : Tuple = value
elif weight_type == "running_var":
lowercase : Dict = value
elif weight_type == "num_batches_tracked":
lowercase : Optional[int] = value
elif weight_type == "inv_freq":
lowercase : List[Any] = value
else:
lowercase : Dict = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _A ( A ,A ,A ) -> int:
lowercase : Optional[int] = []
lowercase : Tuple = fairseq_model.state_dict()
lowercase : Optional[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowercase : str = False
if "conv_layers" in name:
load_conv_layer(
A ,A ,A ,A ,hf_model.config.feat_extract_norm == "group" ,)
lowercase : Dict = True
else:
for key, mapped_key in MAPPING.items():
lowercase : List[str] = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase : List[str] = True
if "*" in mapped_key:
lowercase : int = name.split(A )[0].split("." )[-2]
lowercase : Optional[Any] = mapped_key.replace("*" ,A )
if "pos_bias_u" in name:
lowercase : str = None
elif "pos_bias_v" in name:
lowercase : Optional[int] = None
elif "weight_g" in name:
lowercase : int = "weight_g"
elif "weight_v" in name:
lowercase : Dict = "weight_v"
elif "bias" in name:
lowercase : List[str] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : Optional[int] = "weight"
elif "running_mean" in name:
lowercase : Optional[int] = "running_mean"
elif "inv_freq" in name:
lowercase : Dict = "inv_freq"
elif "running_var" in name:
lowercase : int = "running_var"
elif "num_batches_tracked" in name:
lowercase : Optional[Any] = "num_batches_tracked"
else:
lowercase : int = None
set_recursively(A ,A ,A ,A ,A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _A ( A ,A ,A ,A ,A ) -> List[str]:
lowercase : Tuple = full_name.split("conv_layers." )[-1]
lowercase : Optional[Any] = name.split("." )
lowercase : str = int(items[0] )
lowercase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase : str = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(A )
@torch.no_grad()
def _A ( A ,A ,A=None ,A=None ,A=True ) -> Optional[Any]:
if config_path is not None:
lowercase : Tuple = WavaVecaConformerConfig.from_pretrained(A ,hidden_act="swish" )
else:
lowercase : Any = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowercase : str = "rotary"
if is_finetuned:
if dict_path:
lowercase : List[str] = Dictionary.load(A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase : Optional[int] = target_dict.pad_index
lowercase : Optional[int] = target_dict.bos_index
lowercase : Optional[Any] = target_dict.eos_index
lowercase : str = len(target_dict.symbols )
lowercase : List[Any] = os.path.join(A ,"vocab.json" )
if not os.path.isdir(A ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(A ) )
return
os.makedirs(A ,exist_ok=A )
lowercase : Any = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase : Any = 0
lowercase : Tuple = 1
with open(A ,"w" ,encoding="utf-8" ) as vocab_handle:
json.dump(A ,A )
lowercase : Tuple = WavaVecaCTCTokenizer(
A ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=A ,)
lowercase : Dict = True if config.feat_extract_norm == "layer" else False
lowercase : List[str] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_6_0_0_0 ,padding_value=0 ,do_normalize=A ,return_attention_mask=A ,)
lowercase : List[str] = WavaVecaProcessor(feature_extractor=A ,tokenizer=A )
processor.save_pretrained(A )
lowercase : Any = WavaVecaConformerForCTC(A )
else:
lowercase : str = WavaVecaConformerForPreTraining(A )
if is_finetuned:
lowercase , lowercase , lowercase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowercase : List[str] = argparse.Namespace(task="audio_pretraining" )
lowercase : Union[str, Any] = fairseq.tasks.setup_task(A )
lowercase , lowercase , lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=A )
lowercase : List[str] = model[0].eval()
recursively_load_weights(A ,A ,not is_finetuned )
hf_wavavec.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCAmelCase : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 372 | 0 |
from __future__ import annotations
from cmath import sqrt
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
__snake_case : int = b * b - 4 * a * c
__snake_case : Dict = (-b + sqrt(__SCREAMING_SNAKE_CASE )) / (2 * a)
__snake_case : Optional[Any] = (-b - sqrt(__SCREAMING_SNAKE_CASE )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case , __snake_case : Optional[int] = quadratic_roots(a=5 , b=6 , c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 390 | lowercase_ = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 390 | 1 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __lowercase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any]=1_024 , UpperCamelCase_ : Dict=1_024 , UpperCamelCase_ : Any=3.6 ):
"""simple docstring"""
__A = tokenizer
__A = tokenizer.bos_token_id
__A = dataset
__A = seq_length
__A = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Any ):
"""simple docstring"""
__A = iter(self.dataset )
__A = True
while more_examples:
__A = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_snake_case )["""content"""] )
buffer_len += len(buffer[-1] )
except StopIteration:
__A = False
break
__A = tokenizer(_snake_case , truncation=_snake_case )["input_ids"]
__A = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_snake_case ) , self.seq_length ):
__A = all_token_ids[i : i + self.seq_length]
if len(_snake_case ) == self.seq_length:
yield torch.tensor(_snake_case )
def _SCREAMING_SNAKE_CASE ( __lowercase : int ) -> Dict:
"""simple docstring"""
__A = {"streaming": True}
__A = load_dataset(args.dataset_name , split="""train""" , **_SCREAMING_SNAKE_CASE )
__A = ConstantLengthDataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , seq_length=args.seq_length )
__A = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=args.batch_size )
return eval_dataloader
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
model.eval()
__A = []
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
with torch.no_grad():
__A = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
__A = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_SCREAMING_SNAKE_CASE ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__A = torch.mean(torch.cat(_SCREAMING_SNAKE_CASE ) )
try:
__A = torch.exp(_SCREAMING_SNAKE_CASE )
except OverflowError:
__A = float("""inf""" )
return loss.item(), perplexity.item()
# Setup Accelerator
__a : Any = Accelerator()
# Parse configuration
__a : Dict = HfArgumentParser(EvaluationArguments)
__a : Optional[Any] = parser.parse_args()
set_seed(args.seed)
# Logging
__a : Union[str, Any] = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
__a : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__a : Any = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__a : List[str] = create_dataloader(args)
# Prepare everything with our `accelerator`.
__a ,__a : List[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
__a ,__a : List[str] = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 637 |
'''simple docstring'''
from statistics import mean, stdev
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Dict = min(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = max(_SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data]
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Tuple = mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = stdev(_SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
| 71 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class lowerCamelCase_ ( lowercase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = "pix2struct_text_model"
_lowerCAmelCase : List[Any] = ["past_key_values"]
_lowerCAmelCase : Optional[int] = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , UpperCAmelCase__=5_0244 , UpperCAmelCase__=768 , UpperCAmelCase__=64 , UpperCAmelCase__=2048 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=32 , UpperCAmelCase__=128 , UpperCAmelCase__=0.1 , UpperCAmelCase__=1e-6 , UpperCAmelCase__=1.0 , UpperCAmelCase__="gelu_new" , UpperCAmelCase__=0 , UpperCAmelCase__=False , UpperCAmelCase__=0 , UpperCAmelCase__=1 , UpperCAmelCase__=False , UpperCAmelCase__=True , **UpperCAmelCase__ , ):
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = d_kv
SCREAMING_SNAKE_CASE__ = d_ff
SCREAMING_SNAKE_CASE__ = num_layers
SCREAMING_SNAKE_CASE__ = num_heads
SCREAMING_SNAKE_CASE__ = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ = dropout_rate
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = eos_token_id
SCREAMING_SNAKE_CASE__ = decoder_start_token_id
# for backwards compatibility
SCREAMING_SNAKE_CASE__ = dense_act_fn
super().__init__(
pad_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , tie_word_embeddings=UpperCAmelCase__ , is_decoder=UpperCAmelCase__ , **UpperCAmelCase__ , )
@classmethod
def lowerCAmelCase__ ( cls , UpperCAmelCase__ , **UpperCAmelCase__ ):
cls._set_token_in_kwargs(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
SCREAMING_SNAKE_CASE__ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
class lowerCamelCase_ ( lowercase ):
"""simple docstring"""
_lowerCAmelCase : int = "pix2struct_vision_model"
def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=768 , UpperCAmelCase__=2048 , UpperCAmelCase__=64 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__="gelu_new" , UpperCAmelCase__=1e-6 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=1e-10 , UpperCAmelCase__=1.0 , UpperCAmelCase__=4096 , UpperCAmelCase__=32 , UpperCAmelCase__=128 , **UpperCAmelCase__ , ):
super().__init__(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = patch_embed_hidden_size
SCREAMING_SNAKE_CASE__ = d_ff
SCREAMING_SNAKE_CASE__ = dropout_rate
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = dense_act_fn
SCREAMING_SNAKE_CASE__ = seq_len
SCREAMING_SNAKE_CASE__ = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ = d_kv
@classmethod
def lowerCAmelCase__ ( cls , UpperCAmelCase__ , **UpperCAmelCase__ ):
cls._set_token_in_kwargs(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
SCREAMING_SNAKE_CASE__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
class lowerCamelCase_ ( lowercase ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = "pix2struct"
_lowerCAmelCase : List[Any] = True
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=1.0 , UpperCAmelCase__=0.02 , UpperCAmelCase__=False , UpperCAmelCase__=False , UpperCAmelCase__=True , **UpperCAmelCase__ , ):
super().__init__(tie_word_embeddings=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ )
if text_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
SCREAMING_SNAKE_CASE__ = PixaStructTextConfig(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = PixaStructVisionConfig(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = self.text_config.decoder_start_token_id
SCREAMING_SNAKE_CASE__ = self.text_config.pad_token_id
SCREAMING_SNAKE_CASE__ = self.text_config.eos_token_id
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = self.initializer_range
SCREAMING_SNAKE_CASE__ = self.initializer_range
SCREAMING_SNAKE_CASE__ = is_vqa
@classmethod
def lowerCAmelCase__ ( cls , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
| 701 |
"""simple docstring"""
from math import sqrt
def __lowercase ( lowerCamelCase_ : int ):
SCREAMING_SNAKE_CASE__ = 0
for i in range(1 , int(sqrt(lowerCamelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowerCamelCase_ ):
total += i + n // i
elif i == sqrt(lowerCamelCase_ ):
total += i
return total - n
def __lowercase ( lowerCamelCase_ : int = 10000 ):
SCREAMING_SNAKE_CASE__ = sum(
i
for i in range(1 , lowerCamelCase_ )
if sum_of_divisors(sum_of_divisors(lowerCamelCase_ ) ) == i and sum_of_divisors(lowerCamelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 112 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Dict=7, UpperCamelCase__ : str=3, UpperCamelCase__ : List[Any]=30, UpperCamelCase__ : List[str]=4_00, UpperCamelCase__ : str=True, UpperCamelCase__ : Dict=None, UpperCamelCase__ : Union[str, Any]=0.9, UpperCamelCase__ : List[Any]=None, UpperCamelCase__ : Union[str, Any]=True, UpperCamelCase__ : Any=[0.5, 0.5, 0.5], UpperCamelCase__ : List[str]=[0.5, 0.5, 0.5], ) -> Tuple:
_A = size if size is not None else {'shortest_edge': 30}
_A = crop_size if crop_size is not None else {'height': 30, 'width': 30}
_A = parent
_A = batch_size
_A = num_channels
_A = min_resolution
_A = max_resolution
_A = do_resize_and_center_crop
_A = size
_A = crop_pct
_A = crop_size
_A = do_normalize
_A = image_mean
_A = image_std
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase_ ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
_A = PoolFormerImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : Dict ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__, 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(UpperCamelCase__, 'size' ) )
self.assertTrue(hasattr(UpperCamelCase__, 'crop_pct' ) )
self.assertTrue(hasattr(UpperCamelCase__, 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase__, 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase__, 'image_std' ) )
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size, {'height': 30, 'width': 30} )
_A = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size, {'height': 84, 'width': 84} )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
pass
def __UpperCAmelCase ( self : int ) -> Optional[int]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
_A = image_processing(UpperCamelCase__, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
_A = image_processing(UpperCamelCase__, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
_A = image_processing(UpperCamelCase__, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
| 107 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( A__ ):
__A : Dict = ["""image_processor""", """tokenizer"""]
__A : List[str] = """BridgeTowerImageProcessor"""
__A : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
# add pixel_values + pixel_mask
_UpperCAmelCase = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , do_normalize=_UpperCamelCase , do_center_crop=_UpperCamelCase , **_UpperCamelCase )
encoding.update(_UpperCamelCase )
return encoding
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 32 | 0 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCamelCase = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def lowercase__( A ):
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
lowerCamelCase = parser.parse_args()
if args.check_lib:
lowerCamelCase = importlib.import_module('transformers')
lowerCamelCase = Path(transformers_module.__file__).parent
else:
lowerCamelCase = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 707 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def lowercase__( A , A=False ):
snake_case__ : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowercase__( A , A , A=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : int = ''
else:
snake_case__ : Any = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Dict = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
snake_case__ : int = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Dict = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Any = in_proj_bias[: config.hidden_size]
snake_case__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : Any = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Dict = in_proj_bias[-config.hidden_size :]
def lowercase__( A ):
snake_case__ : List[Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(A , A )
def lowercase__( A , A , A ):
snake_case__ : str = dct.pop(A )
snake_case__ : Optional[int] = val
def lowercase__( ):
snake_case__ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : Any = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def lowercase__( A , A , A=True ):
snake_case__ : Any = ViTConfig()
# patch_size
if model_name[-1] == "8":
snake_case__ : Any = 8
# set labels if required
if not base_model:
snake_case__ : Tuple = 1_0_0_0
snake_case__ : int = 'huggingface/label-files'
snake_case__ : int = 'imagenet-1k-id2label.json'
snake_case__ : str = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
snake_case__ : Union[str, Any] = {int(A ): v for k, v in idalabel.items()}
snake_case__ : List[Any] = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
snake_case__ : Optional[Any] = 3_8_4
snake_case__ : Optional[int] = 1_5_3_6
snake_case__ : List[Any] = 1_2
snake_case__ : Optional[Any] = 6
# load original model from torch hub
snake_case__ : Tuple = torch.hub.load('facebookresearch/dino:main' , A )
original_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : List[Any] = original_model.state_dict()
if base_model:
remove_classification_head_(A )
snake_case__ : str = create_rename_keys(A , base_model=A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , A , A )
# load HuggingFace model
if base_model:
snake_case__ : int = ViTModel(A , add_pooling_layer=A ).eval()
else:
snake_case__ : Dict = ViTForImageClassification(A ).eval()
model.load_state_dict(A )
# Check outputs on an image, prepared by ViTImageProcessor
snake_case__ : Tuple = ViTImageProcessor()
snake_case__ : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='pt' )
snake_case__ : Tuple = encoding['pixel_values']
snake_case__ : str = model(A )
if base_model:
snake_case__ : Optional[int] = original_model(A )
assert torch.allclose(A , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
snake_case__ : Optional[Any] = original_model(A )
assert logits.shape == outputs.logits.shape
assert torch.allclose(A , outputs.logits , atol=1e-3 )
Path(A ).mkdir(exist_ok=A )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
lowerCamelCase : Tuple = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 303 | 0 |
def lowerCAmelCase_ ( __a ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase_ ( __a ) -> float:
"""simple docstring"""
return np.dot(__a , __a )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : List[str] , *,
UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ) ->None:
'''simple docstring'''
lowerCamelCase__: Dict =regularization
lowerCamelCase__: Any =gamma
if kernel == "linear":
lowerCamelCase__: Dict =self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma")
if not isinstance(self.gamma , (float, int)):
raise ValueError("gamma must be float or int")
if not self.gamma > 0:
raise ValueError("gamma must be > 0")
lowerCamelCase__: Tuple =self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowerCamelCase__: Optional[Any] =F"""Unknown kernel: {kernel}"""
raise ValueError(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray) ->float:
'''simple docstring'''
return np.dot(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray) ->float:
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray) ->None:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =observations
lowerCamelCase__: Optional[int] =classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowerCamelCase__) , ): List[str] =np.shape(UpperCAmelCase_)
def to_minimize(UpperCAmelCase_ : ndarray) -> float:
lowerCamelCase__: int =0
((lowerCamelCase__) , ): Optional[Any] =np.shape(UpperCAmelCase_)
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(UpperCAmelCase_)
lowerCamelCase__: List[Any] =LinearConstraint(UpperCAmelCase_ , 0 , 0)
lowerCamelCase__: str =Bounds(0 , self.regularization)
lowerCamelCase__: Union[str, Any] =minimize(
UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x
lowerCamelCase__: str =l_star
# calculating mean offset of separation plane to points
lowerCamelCase__: Tuple =0
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
lowerCamelCase__: int =s / n
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : ndarray) ->int:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCAmelCase_)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 | 1 |
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = word.split()
def justify(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
snake_case__ = max_width - width
snake_case__ = len(A_ )
if len(A_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
snake_case__ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
snake_case__ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
snake_case__ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(A_ ):
num_spaces_between_words_list[i] += 1
snake_case__ = []
for i in range(A_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(A_ )
snake_case__ = []
snake_case__ = []
snake_case__ = 0
for word in words:
if width + len(A_ ) + len(A_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(A_ )
width += len(A_ )
else:
# justify the line and add it to result
answer.append(justify(A_ , A_ , A_ ) )
# reset new line and new width
snake_case__ , snake_case__ = [word], len(A_ )
snake_case__ = max_width - width - len(A_ )
answer.append(" ".join(A_ ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 710 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
return EnvironmentCommand()
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
return EnvironmentCommand(args.accelerate_config_file )
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
@staticmethod
def A_ ( lowerCamelCase ):
snake_case__ = parser.add_parser("env" )
download_parser.set_defaults(func=lowerCamelCase )
download_parser.add_argument(
"--accelerate-config_file" , default=lowerCamelCase , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=lowerCamelCase )
def __init__( self , lowerCamelCase , *lowerCamelCase ):
snake_case__ = accelerate_config_file
def A_ ( self ):
snake_case__ = "not installed"
if is_safetensors_available():
import safetensors
snake_case__ = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
snake_case__ = F"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
snake_case__ = "not installed"
snake_case__ = snake_case__ = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
snake_case__ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCamelCase ):
snake_case__ = load_config_from_file(self._accelerate_config_file ).to_dict()
snake_case__ = (
"\n".join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowerCamelCase , lowerCamelCase )
else F"""\t{accelerate_config}"""
)
snake_case__ = "not installed"
snake_case__ = "NA"
if is_torch_available():
import torch
snake_case__ = torch.__version__
snake_case__ = torch.cuda.is_available()
snake_case__ = "not installed"
snake_case__ = "NA"
if is_tf_available():
import tensorflow as tf
snake_case__ = tf.__version__
try:
# deprecated in v2.1
snake_case__ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
snake_case__ = bool(tf.config.list_physical_devices("GPU" ) )
snake_case__ = "not installed"
snake_case__ = "not installed"
snake_case__ = "not installed"
snake_case__ = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
snake_case__ = flax.__version__
snake_case__ = jax.__version__
snake_case__ = jaxlib.__version__
snake_case__ = jax.lib.xla_bridge.get_backend().platform
snake_case__ = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": F"""{safetensors_version}""",
"Accelerate version": F"""{accelerate_version}""",
"Accelerate config": F"""{accelerate_config_str}""",
"PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""",
"Tensorflow version (GPU?)": F"""{tf_version} ({tf_cuda_available})""",
"Flax version (CPU?/GPU?/TPU?)": F"""{flax_version} ({jax_backend})""",
"Jax version": F"""{jax_version}""",
"JaxLib version": F"""{jaxlib_version}""",
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(lowerCamelCase ) )
return info
@staticmethod
def A_ ( lowerCamelCase ):
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 530 | 0 |
"""simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __snake_case (__SCREAMING_SNAKE_CASE ):
__a = """M-CLIP"""
def __init__( self: Optional[int] , A_: Dict=10_24 , A_: List[Any]=7_68 , **A_: Any ):
__lowerCamelCase = transformerDimSize
__lowerCamelCase = imageDimSize
super().__init__(**A_ )
class __snake_case (__SCREAMING_SNAKE_CASE ):
__a = MCLIPConfig
def __init__( self: Any , A_: Tuple , *A_: Tuple , **A_: List[str] ):
super().__init__(A_ , *A_ , **A_ )
__lowerCamelCase = XLMRobertaModel(A_ )
__lowerCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def __a ( self: Optional[Any] , A_: Optional[Any] , A_: str ):
__lowerCamelCase = self.transformer(input_ids=A_ , attention_mask=A_ )[0]
__lowerCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A_ ), embs
| 281 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class A__ ( nn.Module):
def __init__( self , __magic_name__ = 1_6 , __magic_name__ = 8_8 , __magic_name__ = None , __magic_name__ = 1 , __magic_name__ = 0.0 , __magic_name__ = 3_2 , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "geglu" , __magic_name__ = None , ):
super().__init__()
lowerCamelCase : Any = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__magic_name__ , attention_head_dim=__magic_name__ , in_channels=__magic_name__ , num_layers=__magic_name__ , dropout=__magic_name__ , norm_num_groups=__magic_name__ , cross_attention_dim=__magic_name__ , attention_bias=__magic_name__ , sample_size=__magic_name__ , num_vector_embeds=__magic_name__ , activation_fn=__magic_name__ , num_embeds_ada_norm=__magic_name__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCamelCase : Any = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCamelCase : List[Any] = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCamelCase : Optional[int] = [1, 0]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = True , ):
lowerCamelCase : List[Any] = hidden_states
lowerCamelCase : Dict = []
lowerCamelCase : List[Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCamelCase : Dict = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCamelCase : Optional[int] = self.transformer_index_for_condition[i]
lowerCamelCase : List[Any] = self.transformers[transformer_index](
__magic_name__ , encoder_hidden_states=__magic_name__ , timestep=__magic_name__ , cross_attention_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCamelCase : Dict = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__magic_name__ )
| 681 | 0 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : Optional[Any] =['''sentencepiece''']
def __init__( self :int, *snake_case :Union[str, Any], **snake_case :List[str]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : Tuple =['''sentencepiece''']
def __init__( self :Union[str, Any], *snake_case :Tuple, **snake_case :Optional[Any]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] =['''sentencepiece''']
def __init__( self :Union[str, Any], *snake_case :str, **snake_case :Dict):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : List[str] =['''sentencepiece''']
def __init__( self :Optional[Any], *snake_case :Any, **snake_case :Optional[Any]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] =['''sentencepiece''']
def __init__( self :int, *snake_case :Any, **snake_case :List[str]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : str =['''sentencepiece''']
def __init__( self :int, *snake_case :Tuple, **snake_case :Tuple):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : Optional[Any] =['''sentencepiece''']
def __init__( self :Optional[Any], *snake_case :int, **snake_case :Optional[Any]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] =['''sentencepiece''']
def __init__( self :Union[str, Any], *snake_case :Optional[int], **snake_case :List[Any]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] =['''sentencepiece''']
def __init__( self :Union[str, Any], *snake_case :List[Any], **snake_case :Dict):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : Optional[Any] =['''sentencepiece''']
def __init__( self :int, *snake_case :Union[str, Any], **snake_case :List[str]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : List[Any] =['''sentencepiece''']
def __init__( self :Dict, *snake_case :List[Any], **snake_case :Dict):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : str =['''sentencepiece''']
def __init__( self :List[str], *snake_case :List[Any], **snake_case :int):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : Dict =['''sentencepiece''']
def __init__( self :Union[str, Any], *snake_case :int, **snake_case :Any):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : Optional[Any] =['''sentencepiece''']
def __init__( self :List[str], *snake_case :str, **snake_case :List[str]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] =['''sentencepiece''']
def __init__( self :List[Any], *snake_case :Dict, **snake_case :Optional[Any]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : Any =['''sentencepiece''']
def __init__( self :Tuple, *snake_case :Optional[int], **snake_case :Optional[int]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : str =['''sentencepiece''']
def __init__( self :Optional[int], *snake_case :List[str], **snake_case :List[str]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : str =['''sentencepiece''']
def __init__( self :Optional[Any], *snake_case :List[str], **snake_case :Optional[int]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : Any =['''sentencepiece''']
def __init__( self :Optional[Any], *snake_case :Tuple, **snake_case :Union[str, Any]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : Optional[Any] =['''sentencepiece''']
def __init__( self :Optional[int], *snake_case :Any, **snake_case :Dict):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : int =['''sentencepiece''']
def __init__( self :Optional[int], *snake_case :List[Any], **snake_case :str):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : List[str] =['''sentencepiece''']
def __init__( self :Optional[int], *snake_case :Dict, **snake_case :Optional[Any]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : Tuple =['''sentencepiece''']
def __init__( self :Any, *snake_case :Any, **snake_case :Tuple):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : List[str] =['''sentencepiece''']
def __init__( self :Tuple, *snake_case :Any, **snake_case :Tuple):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : str =['''sentencepiece''']
def __init__( self :Dict, *snake_case :Any, **snake_case :List[Any]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] =['''sentencepiece''']
def __init__( self :Any, *snake_case :List[str], **snake_case :Dict):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : Dict =['''sentencepiece''']
def __init__( self :Any, *snake_case :Optional[Any], **snake_case :Optional[int]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : Any =['''sentencepiece''']
def __init__( self :int, *snake_case :str, **snake_case :int):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : int =['''sentencepiece''']
def __init__( self :List[str], *snake_case :Any, **snake_case :Optional[int]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : Any =['''sentencepiece''']
def __init__( self :Any, *snake_case :Optional[Any], **snake_case :Optional[int]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] =['''sentencepiece''']
def __init__( self :int, *snake_case :Optional[Any], **snake_case :Tuple):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
| 557 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : float =0.0
__lowerCAmelCase : int =1
__lowerCAmelCase : int =1
__lowerCAmelCase : bool =True
__lowerCAmelCase : bool =False
__lowerCAmelCase : bool =False
__lowerCAmelCase : bool =False
__lowerCAmelCase : jnp.dtype =jnp.floataa
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase =[]
_lowercase =[]
for i in range(self.num_layers):
_lowercase =self.in_channels if i == 0 else self.out_channels
_lowercase =FlaxResnetBlockaD(
in_channels=snake_case, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(snake_case)
_lowercase =FlaxTransformeraDModel(
in_channels=self.out_channels, n_heads=self.num_attention_heads, d_head=self.out_channels // self.num_attention_heads, depth=1, use_linear_projection=self.use_linear_projection, only_cross_attention=self.only_cross_attention, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
attentions.append(snake_case)
_lowercase =resnets
_lowercase =attentions
if self.add_downsample:
_lowercase =FlaxDownsampleaD(self.out_channels, dtype=self.dtype)
def __call__( self :str, snake_case :List[Any], snake_case :Optional[Any], snake_case :Tuple, snake_case :Dict=True):
"""simple docstring"""
_lowercase =()
for resnet, attn in zip(self.resnets, self.attentions):
_lowercase =resnet(snake_case, snake_case, deterministic=snake_case)
_lowercase =attn(snake_case, snake_case, deterministic=snake_case)
output_states += (hidden_states,)
if self.add_downsample:
_lowercase =self.downsamplers_a(snake_case)
output_states += (hidden_states,)
return hidden_states, output_states
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : float =0.0
__lowerCAmelCase : int =1
__lowerCAmelCase : bool =True
__lowerCAmelCase : jnp.dtype =jnp.floataa
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =[]
for i in range(self.num_layers):
_lowercase =self.in_channels if i == 0 else self.out_channels
_lowercase =FlaxResnetBlockaD(
in_channels=snake_case, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(snake_case)
_lowercase =resnets
if self.add_downsample:
_lowercase =FlaxDownsampleaD(self.out_channels, dtype=self.dtype)
def __call__( self :Union[str, Any], snake_case :List[str], snake_case :List[str], snake_case :Optional[Any]=True):
"""simple docstring"""
_lowercase =()
for resnet in self.resnets:
_lowercase =resnet(snake_case, snake_case, deterministic=snake_case)
output_states += (hidden_states,)
if self.add_downsample:
_lowercase =self.downsamplers_a(snake_case)
output_states += (hidden_states,)
return hidden_states, output_states
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : float =0.0
__lowerCAmelCase : int =1
__lowerCAmelCase : int =1
__lowerCAmelCase : bool =True
__lowerCAmelCase : bool =False
__lowerCAmelCase : bool =False
__lowerCAmelCase : bool =False
__lowerCAmelCase : jnp.dtype =jnp.floataa
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =[]
_lowercase =[]
for i in range(self.num_layers):
_lowercase =self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowercase =self.prev_output_channel if i == 0 else self.out_channels
_lowercase =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(snake_case)
_lowercase =FlaxTransformeraDModel(
in_channels=self.out_channels, n_heads=self.num_attention_heads, d_head=self.out_channels // self.num_attention_heads, depth=1, use_linear_projection=self.use_linear_projection, only_cross_attention=self.only_cross_attention, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
attentions.append(snake_case)
_lowercase =resnets
_lowercase =attentions
if self.add_upsample:
_lowercase =FlaxUpsampleaD(self.out_channels, dtype=self.dtype)
def __call__( self :int, snake_case :List[str], snake_case :Union[str, Any], snake_case :Any, snake_case :Optional[Any], snake_case :Optional[int]=True):
"""simple docstring"""
for resnet, attn in zip(self.resnets, self.attentions):
# pop res hidden states
_lowercase =res_hidden_states_tuple[-1]
_lowercase =res_hidden_states_tuple[:-1]
_lowercase =jnp.concatenate((hidden_states, res_hidden_states), axis=-1)
_lowercase =resnet(snake_case, snake_case, deterministic=snake_case)
_lowercase =attn(snake_case, snake_case, deterministic=snake_case)
if self.add_upsample:
_lowercase =self.upsamplers_a(snake_case)
return hidden_states
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : float =0.0
__lowerCAmelCase : int =1
__lowerCAmelCase : bool =True
__lowerCAmelCase : jnp.dtype =jnp.floataa
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =[]
for i in range(self.num_layers):
_lowercase =self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowercase =self.prev_output_channel if i == 0 else self.out_channels
_lowercase =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(snake_case)
_lowercase =resnets
if self.add_upsample:
_lowercase =FlaxUpsampleaD(self.out_channels, dtype=self.dtype)
def __call__( self :Any, snake_case :Optional[Any], snake_case :Union[str, Any], snake_case :Optional[Any], snake_case :int=True):
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
_lowercase =res_hidden_states_tuple[-1]
_lowercase =res_hidden_states_tuple[:-1]
_lowercase =jnp.concatenate((hidden_states, res_hidden_states), axis=-1)
_lowercase =resnet(snake_case, snake_case, deterministic=snake_case)
if self.add_upsample:
_lowercase =self.upsamplers_a(snake_case)
return hidden_states
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
__lowerCAmelCase : int
__lowerCAmelCase : float =0.0
__lowerCAmelCase : int =1
__lowerCAmelCase : int =1
__lowerCAmelCase : bool =False
__lowerCAmelCase : bool =False
__lowerCAmelCase : jnp.dtype =jnp.floataa
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =[
FlaxResnetBlockaD(
in_channels=self.in_channels, out_channels=self.in_channels, dropout_prob=self.dropout, dtype=self.dtype, )
]
_lowercase =[]
for _ in range(self.num_layers):
_lowercase =FlaxTransformeraDModel(
in_channels=self.in_channels, n_heads=self.num_attention_heads, d_head=self.in_channels // self.num_attention_heads, depth=1, use_linear_projection=self.use_linear_projection, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
attentions.append(snake_case)
_lowercase =FlaxResnetBlockaD(
in_channels=self.in_channels, out_channels=self.in_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(snake_case)
_lowercase =resnets
_lowercase =attentions
def __call__( self :List[Any], snake_case :Tuple, snake_case :Union[str, Any], snake_case :List[Any], snake_case :Any=True):
"""simple docstring"""
_lowercase =self.resnets[0](snake_case, snake_case)
for attn, resnet in zip(self.attentions, self.resnets[1:]):
_lowercase =attn(snake_case, snake_case, deterministic=snake_case)
_lowercase =resnet(snake_case, snake_case, deterministic=snake_case)
return hidden_states
| 557 | 1 |
'''simple docstring'''
import math
import random
def A (__lowerCamelCase :float , __lowerCamelCase :bool = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_lowercase = 0.02
def A (__lowerCamelCase :int , __lowerCamelCase :int ):
_lowerCAmelCase = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__lowerCamelCase ):
# Forward propagation
_lowerCAmelCase = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
_lowerCAmelCase = (expected / 100) - layer_a
# Error delta
_lowerCAmelCase = layer_1_error * sigmoid_function(__lowerCamelCase , __lowerCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = int(input("""Expected value: """))
_lowercase = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 5 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowercase ( unittest.TestCase ):
def a ( self ):
snake_case_ = 10
def a ( self ):
snake_case_ = [1, 2, 3, 4]
snake_case_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(snake_case , self.block_size , 0 ) , snake_case )
def a ( self ):
snake_case_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
snake_case_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(snake_case , self.block_size , 0 ) , snake_case )
def a ( self ):
snake_case_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
snake_case_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(snake_case , self.block_size , 0 ) , snake_case )
def a ( self ):
snake_case_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
snake_case_ , snake_case_ = process_story(snake_case )
self.assertEqual(snake_case , [] )
def a ( self ):
snake_case_ = ''
snake_case_ , snake_case_ = process_story(snake_case )
self.assertEqual(snake_case , [] )
self.assertEqual(snake_case , [] )
def a ( self ):
snake_case_ = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
snake_case_ , snake_case_ = process_story(snake_case )
snake_case_ = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(snake_case , snake_case )
snake_case_ = ['It was the best of times.']
self.assertEqual(snake_case , snake_case )
def a ( self ):
snake_case_ = torch.tensor([1, 2, 3, 4] )
snake_case_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(snake_case , 0 ).numpy() , expected.numpy() )
def a ( self ):
snake_case_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
snake_case_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(snake_case , 23 ).numpy() , expected.numpy() )
def a ( self ):
snake_case_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
snake_case_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(snake_case , 1 ).numpy() , expected.numpy() )
def a ( self ):
snake_case_ = 101
snake_case_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
snake_case_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
snake_case_ = compute_token_type_ids(snake_case , snake_case )
np.testing.assert_array_equal(snake_case , snake_case )
| 362 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ : List[Any] = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowercase_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 718 | # Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowercase_ : str = re.compile(R'^(?P<major>\d+)' R'\.(?P<minor>\d+)' R'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class _lowerCamelCase :
__a = 42
__a = None
__a = None
__a = None
__a = None
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Dict:
return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def UpperCamelCase_ ( self ) -> List[str]:
return self.major, self.minor, self.patch
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Dict:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
return Version(lowerCAmelCase )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
return other
raise TypeError(f'{other} (type {type(lowerCAmelCase )}) cannot be compared to version.' )
def __eq__( self , lowerCAmelCase ) -> Optional[int]:
try:
SCREAMING_SNAKE_CASE__: List[str]= self._validate_operand(lowerCAmelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[Any]= self._validate_operand(lowerCAmelCase )
return self.tuple < other.tuple
def __hash__( self ) -> List[Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def UpperCamelCase_ ( cls , lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__: Dict= {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def UpperCamelCase_ ( self ) -> str:
return self.version_str
def A__ ( snake_case_ : int ):
SCREAMING_SNAKE_CASE__: List[Any]= _VERSION_REG.match(snake_case_ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(snake_case_ ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def A__ ( snake_case_ : Union[str, Any] ):
return ".".join(str(snake_case_ ) for v in version_tuple )
| 107 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __A :
'''simple docstring'''
@staticmethod
def UpperCAmelCase ( *_snake_case : Any ,**_snake_case : List[str] ) -> List[str]:
"""simple docstring"""
pass
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
lowercase__ : Optional[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCAmelCase ( self : str ,_snake_case : Union[str, Any] ,_snake_case : Union[str, Any] ,_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ : List[str] = DepthEstimationPipeline(model=_snake_case ,image_processor=_snake_case )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase ( self : str ,_snake_case : Optional[Any] ,_snake_case : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ : int = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} ,_snake_case )
import datasets
lowercase__ : str = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' ,'''image''' ,split='''test''' )
lowercase__ : Union[str, Any] = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] ,_snake_case ,)
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@slow
@require_torch
def UpperCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowercase__ : int = '''Intel/dpt-large'''
lowercase__ : Tuple = pipeline('''depth-estimation''' ,model=_snake_case )
lowercase__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
lowercase__ : Dict = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) ,29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) ,2.662 )
@require_torch
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 560 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_lowerCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = ['''pixel_values''']
def __init__( self , A__ = True , A__ = None , A__ = PILImageResampling.BICUBIC , A__ = True , A__ = None , A__ = True , A__ = 1 / 255 , A__ = True , A__ = None , A__ = None , A__ = True , **A__ , ):
"""simple docstring"""
super().__init__(**A__ )
UpperCAmelCase_: Optional[Any] = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_: List[str] = get_size_dict(A__ , default_to_square=A__ )
UpperCAmelCase_: Tuple = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_: List[Any] = get_size_dict(A__ , default_to_square=A__ , param_name="crop_size" )
UpperCAmelCase_: Union[str, Any] = do_resize
UpperCAmelCase_: Optional[int] = size
UpperCAmelCase_: str = resample
UpperCAmelCase_: List[Any] = do_center_crop
UpperCAmelCase_: Optional[int] = crop_size
UpperCAmelCase_: List[str] = do_rescale
UpperCAmelCase_: List[str] = rescale_factor
UpperCAmelCase_: str = do_normalize
UpperCAmelCase_: Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_: Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_: Optional[int] = do_convert_rgb
def snake_case_ ( self , A__ , A__ , A__ = PILImageResampling.BICUBIC , A__ = None , **A__ , ):
"""simple docstring"""
UpperCAmelCase_: List[str] = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCAmelCase_: Dict = get_resize_output_image_size(A__ , size=size["shortest_edge"] , default_to_square=A__ )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def snake_case_ ( self , A__ , A__ , A__ = None , **A__ , ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__ )
def snake_case_ ( self , A__ , A__ , A__ = None , **A__ , ):
"""simple docstring"""
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def snake_case_ ( self , A__ , A__ , A__ , A__ = None , **A__ , ):
"""simple docstring"""
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def snake_case_ ( self , A__ , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = ChannelDimension.FIRST , **A__ , ):
"""simple docstring"""
UpperCAmelCase_: Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_: Tuple = size if size is not None else self.size
UpperCAmelCase_: Tuple = get_size_dict(A__ , param_name="size" , default_to_square=A__ )
UpperCAmelCase_: Tuple = resample if resample is not None else self.resample
UpperCAmelCase_: Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_: Tuple = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_: List[str] = get_size_dict(A__ , param_name="crop_size" , default_to_square=A__ )
UpperCAmelCase_: Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_: List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_: Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_: Any = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_: str = image_std if image_std is not None else self.image_std
UpperCAmelCase_: Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_: Optional[int] = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_: int = [convert_to_rgb(A__ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_: Tuple = [to_numpy_array(A__ ) for image in images]
if do_resize:
UpperCAmelCase_: Any = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_center_crop:
UpperCAmelCase_: Optional[Any] = [self.center_crop(image=A__ , size=A__ ) for image in images]
if do_rescale:
UpperCAmelCase_: Union[str, Any] = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
UpperCAmelCase_: List[Any] = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
UpperCAmelCase_: Optional[int] = [to_channel_dimension_format(A__ , A__ ) for image in images]
UpperCAmelCase_: str = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ ) | 306 |
_lowerCAmelCase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_lowerCAmelCase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def lowercase ( _a ,_a ,_a ) -> list[int]:
UpperCAmelCase_: Tuple = True
UpperCAmelCase_: Optional[int] = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_a ,_a ,_a )
order.append(_a )
return order
def lowercase ( _a ,_a ,_a ) -> list[int]:
UpperCAmelCase_: Optional[int] = True
UpperCAmelCase_: str = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_a ,_a ,_a )
return component
def lowercase ( _a ) -> list[list[int]]:
UpperCAmelCase_: Union[str, Any] = len(_a ) * [False]
UpperCAmelCase_: dict[int, list[int]] = {vert: [] for vert in range(len(_a ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_a )
UpperCAmelCase_: Optional[int] = []
for i, was_visited in enumerate(_a ):
if not was_visited:
order += topology_sort(_a ,_a ,_a )
UpperCAmelCase_: Optional[Any] = []
UpperCAmelCase_: Union[str, Any] = len(_a ) * [False]
for i in range(len(_a ) ):
UpperCAmelCase_: str = order[len(_a ) - i - 1]
if not visited[vert]:
UpperCAmelCase_: List[str] = find_components(_a ,_a ,_a )
components_list.append(_a )
return components_list | 306 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def _a ( _snake_case : ArgumentParser ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def _a ( self : Union[str, Any] ):
"""simple docstring"""
raise NotImplementedError()
| 9 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def lowerCAmelCase_ ( lowercase_ : List[str] , lowercase_ : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = DatasetInfosDict.from_directory(lowercase_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def lowerCAmelCase_ ( lowercase_ : Any , lowercase_ : DatasetInfo ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = str(lowercase_ )
dataset_info.write_to_directory(lowercase_ )
__SCREAMING_SNAKE_CASE : Dict = DatasetInfo.from_directory(lowercase_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowercase_ , '''dataset_info.json''' ) )
def lowerCAmelCase_ ( ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
__SCREAMING_SNAKE_CASE : Optional[int] = dataset_info._to_yaml_dict()
assert sorted(lowercase_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__SCREAMING_SNAKE_CASE : int = yaml.safe_dump(lowercase_ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = yaml.safe_load(lowercase_ )
assert dataset_info_yaml_dict == reloaded
def lowerCAmelCase_ ( ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetInfo()
__SCREAMING_SNAKE_CASE : Union[str, Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def lowerCAmelCase_ ( lowercase_ : List[Any] , lowercase_ : DatasetInfosDict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = str(lowercase_ )
dataset_infos_dict.write_to_directory(lowercase_ )
__SCREAMING_SNAKE_CASE : Optional[Any] = DatasetInfosDict.from_directory(lowercase_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__SCREAMING_SNAKE_CASE : Optional[int] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__SCREAMING_SNAKE_CASE : Tuple = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowercase_ , '''README.md''' ) )
| 674 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_lowerCAmelCase = random.Random()
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=1.0 ,_lowerCAmelCase=None ,_lowerCAmelCase=None ):
'''simple docstring'''
if rng is None:
A_ : Any = global_rng
A_ : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=400 , a__=2000 , a__=2048 , a__=128 , a__=1 , a__=512 , a__=30 , a__=44100 , ):
A_ : str = parent
A_ : str = batch_size
A_ : str = min_seq_length
A_ : Dict = max_seq_length
A_ : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A_ : Optional[Any] = spectrogram_length
A_ : Optional[int] = feature_size
A_ : Tuple = num_audio_channels
A_ : Union[str, Any] = hop_length
A_ : List[Any] = chunk_length
A_ : str = sampling_rate
def _lowerCamelCase ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _lowerCamelCase ( self , a__=False , a__=False ):
def _flatten(a__ ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
A_ : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A_ : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A_ : Optional[Any] = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _UpperCAmelCase ( UpperCamelCase_ ,unittest.TestCase ):
a = TvltFeatureExtractor
def _lowerCamelCase ( self ):
A_ : Dict = TvltFeatureExtractionTester(self )
def _lowerCamelCase ( self ):
A_ : Any = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """spectrogram_length""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """feature_size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """num_audio_channels""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """hop_length""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """chunk_length""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """sampling_rate""" ) )
def _lowerCamelCase ( self ):
A_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Optional[Any] = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
A_ : List[Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
A_ : Optional[Any] = feat_extract_first.to_dict()
A_ : str = feat_extract_second.to_dict()
A_ : Optional[int] = dict_first.pop("""mel_filters""" )
A_ : Optional[int] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def _lowerCamelCase ( self ):
A_ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Tuple = os.path.join(UpperCamelCase__ , """feat_extract.json""" )
feat_extract_first.to_json_file(UpperCamelCase__ )
A_ : List[str] = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
A_ : Any = feat_extract_first.to_dict()
A_ : str = feat_extract_second.to_dict()
A_ : int = dict_first.pop("""mel_filters""" )
A_ : Any = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def _lowerCamelCase ( self ):
A_ : int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
A_ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A_ : Optional[int] = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
A_ : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
A_ : Optional[Any] = feature_extractor(UpperCamelCase__ , return_tensors="""np""" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
A_ : List[str] = feature_extractor(
UpperCamelCase__ , return_tensors="""np""" , sampling_rate=44100 , mask_audio=UpperCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
A_ : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A_ : Dict = np.asarray(UpperCamelCase__ )
A_ : str = feature_extractor(UpperCamelCase__ , return_tensors="""np""" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _lowerCamelCase ( self , a__ ):
A_ : List[str] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A_ : Dict = ds.sort("""id""" ).select(range(UpperCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self ):
A_ : Optional[Any] = self._load_datasamples(1 )
A_ : int = TvltFeatureExtractor()
A_ : List[Any] = feature_extractor(UpperCamelCase__ , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
A_ : str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCamelCase__ , atol=1E-4 ) )
| 712 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=False ):
'''simple docstring'''
A_ : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A_ : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
A_ : str = """"""
else:
A_ : List[str] = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
A_ : str = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A_ : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
A_ : Optional[Any] = in_proj_bias[: config.hidden_size]
A_ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : str = in_proj_weight[
-config.hidden_size :, :
]
A_ : Any = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : List[str] = dct.pop(_lowerCAmelCase )
A_ : Any = val
def _lowerCAmelCase ( ):
'''simple docstring'''
A_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Union[str, Any] = Image.open(requests.get(_lowerCAmelCase ,stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : Any = DeiTConfig()
# all deit models have fine-tuned heads
A_ : Optional[Any] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A_ : Optional[int] = 1_0_0_0
A_ : List[Any] = """huggingface/label-files"""
A_ : str = """imagenet-1k-id2label.json"""
A_ : Union[str, Any] = json.load(open(hf_hub_download(_lowerCAmelCase ,_lowerCAmelCase ,repo_type="""dataset""" ) ,"""r""" ) )
A_ : Optional[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A_ : Union[str, Any] = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
A_ : Dict = int(deit_name[-6:-4] )
A_ : Any = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
A_ : str = 1_9_2
A_ : Dict = 7_6_8
A_ : List[Any] = 1_2
A_ : Dict = 3
elif deit_name[9:].startswith("""small""" ):
A_ : Optional[int] = 3_8_4
A_ : List[Any] = 1_5_3_6
A_ : Optional[Any] = 1_2
A_ : List[str] = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
A_ : Dict = 1_0_2_4
A_ : int = 4_0_9_6
A_ : Any = 2_4
A_ : str = 1_6
# load original model from timm
A_ : int = timm.create_model(_lowerCAmelCase ,pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : Union[str, Any] = timm_model.state_dict()
A_ : Optional[Any] = create_rename_keys(_lowerCAmelCase ,_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# load HuggingFace model
A_ : List[str] = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
A_ : List[Any] = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A_ : Any = DeiTImageProcessor(size=_lowerCAmelCase ,crop_size=config.image_size )
A_ : Optional[int] = image_processor(images=prepare_img() ,return_tensors="""pt""" )
A_ : List[str] = encoding["""pixel_values"""]
A_ : List[Any] = model(_lowerCAmelCase )
A_ : Tuple = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase ,outputs.logits ,atol=1e-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCAmelCase = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 481 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Union[str, Any] = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ["PerceiverFeatureExtractor"]
_lowerCamelCase : List[str] = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 429 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __snake_case :
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
# Automatically constructed
lowerCAmelCase__ = "dict"
lowerCAmelCase__ = None
lowerCAmelCase__ = field(default="Translation" , init=_a , repr=_a )
def __call__( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __snake_case :
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
# Automatically constructed
lowerCAmelCase__ = "dict"
lowerCAmelCase__ = None
lowerCAmelCase__ = field(default="TranslationVariableLanguages" , init=_a , repr=_a )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = sorted(set(self.languages ) ) if self.languages else None
_lowerCAmelCase : Optional[int] = len(self.languages ) if self.languages else None
def __call__( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(_UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_lowerCAmelCase : Dict = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_lowerCAmelCase , _lowerCAmelCase : int = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 429 | 1 |
'''simple docstring'''
import argparse
import datetime
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : str = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
snake_case_ : Optional[Any] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(A_ ) < 1_1:
raise ValueError("Must be 10 characters long" )
# Get month
snake_case_ : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 1_3:
raise ValueError("Month must be between 1 - 12" )
snake_case_ : Optional[Any] = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get day
snake_case_ : Optional[Any] = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 3_2:
raise ValueError("Date must be between 1 - 31" )
# Get second separator
snake_case_ : Union[str, Any] = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get year
snake_case_ : List[str] = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?" )
# Get datetime obj for validation
snake_case_ : int = datetime.date(int(A_ ), int(A_ ), int(A_ ) )
# Start math
if m <= 2:
snake_case_ : Dict = y - 1
snake_case_ : Tuple = m + 1_2
# maths var
snake_case_ : Union[str, Any] = int(str(A_ )[:2] )
snake_case_ : List[Any] = int(str(A_ )[2:] )
snake_case_ : int = int(2.6 * m - 5.39 )
snake_case_ : Optional[int] = int(c / 4 )
snake_case_ : Any = int(k / 4 )
snake_case_ : Any = int(d + k )
snake_case_ : Tuple = int(t + u + v + x )
snake_case_ : int = int(z - (2 * c) )
snake_case_ : Union[str, Any] = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer." )
# Response
snake_case_ : Any = f'Your date {date_input}, is a {days[str(A_ )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = argparse.ArgumentParser(
description=(
"Find out what day of the week nearly any date is or was. Enter "
"date as a string in the mm-dd-yyyy or mm/dd/yyyy format"
)
)
parser.add_argument(
"date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)"
)
a_ = parser.parse_args()
zeller(args.date_input)
| 710 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a_ = "src/diffusers"
a_ = "."
# This is to make sure the diffusers module imported is the one in the repo.
a_ = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
a_ = spec.loader.load_module()
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return line.startswith(__SCREAMING_SNAKE_CASE ) or len(__SCREAMING_SNAKE_CASE ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$", __SCREAMING_SNAKE_CASE ) is not None
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Optional[int] = object_name.split("." )
snake_case_ : Dict = 0
# First let's find the module where our object lives.
snake_case_ : int = parts[i]
while i < len(__SCREAMING_SNAKE_CASE ) and not os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE, f'{module}.py' ) ):
i += 1
if i < len(__SCREAMING_SNAKE_CASE ):
snake_case_ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE, parts[i] )
if i >= len(__SCREAMING_SNAKE_CASE ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(__SCREAMING_SNAKE_CASE, f'{module}.py' ), "r", encoding="utf-8", newline="\n" ) as f:
snake_case_ : str = f.readlines()
# Now let's find the class / func in the code!
snake_case_ : Any = ""
snake_case_ : Optional[Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__SCREAMING_SNAKE_CASE ) and re.search(rf'^{indent}(class|def)\s+{name}(\(|\:)', lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__SCREAMING_SNAKE_CASE ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
snake_case_ : Union[str, Any] = line_index
while line_index < len(__SCREAMING_SNAKE_CASE ) and _should_continue(lines[line_index], __SCREAMING_SNAKE_CASE ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
snake_case_ : Dict = lines[start_index:line_index]
return "".join(__SCREAMING_SNAKE_CASE )
a_ = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
a_ = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)")
a_ = re.compile(R"<FILL\s+[^>]*>")
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : str = code.split("\n" )
snake_case_ : Union[str, Any] = 0
while idx < len(__SCREAMING_SNAKE_CASE ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__SCREAMING_SNAKE_CASE ):
return re.search(r"^(\s*)\S", lines[idx] ).groups()[0]
return ""
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Optional[Any] = len(get_indent(__SCREAMING_SNAKE_CASE ) ) > 0
if has_indent:
snake_case_ : Optional[Any] = f'class Bla:\n{code}'
snake_case_ : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=1_1_9, preview=__SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = black.format_str(__SCREAMING_SNAKE_CASE, mode=__SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ : Optional[Any] = style_docstrings_in_code(__SCREAMING_SNAKE_CASE )
return result[len("class Bla:\n" ) :] if has_indent else result
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE, "r", encoding="utf-8", newline="\n" ) as f:
snake_case_ : List[Any] = f.readlines()
snake_case_ : str = []
snake_case_ : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__SCREAMING_SNAKE_CASE ):
snake_case_ : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = search.groups()
snake_case_ : int = find_code_in_diffusers(__SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = get_indent(__SCREAMING_SNAKE_CASE )
snake_case_ : Dict = line_index + 1 if indent == theoretical_indent else line_index + 2
snake_case_ : Dict = theoretical_indent
snake_case_ : Tuple = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
snake_case_ : str = True
while line_index < len(__SCREAMING_SNAKE_CASE ) and should_continue:
line_index += 1
if line_index >= len(__SCREAMING_SNAKE_CASE ):
break
snake_case_ : Dict = lines[line_index]
snake_case_ : List[Any] = _should_continue(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) and re.search(f'^{indent}# End copy', __SCREAMING_SNAKE_CASE ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
snake_case_ : Optional[int] = lines[start_index:line_index]
snake_case_ : Optional[Any] = "".join(__SCREAMING_SNAKE_CASE )
# Remove any nested `Copied from` comments to avoid circular copies
snake_case_ : int = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__SCREAMING_SNAKE_CASE ) is None]
snake_case_ : Dict = "\n".join(__SCREAMING_SNAKE_CASE )
# Before comparing, use the `replace_pattern` on the original code.
if len(__SCREAMING_SNAKE_CASE ) > 0:
snake_case_ : Union[str, Any] = replace_pattern.replace("with", "" ).split("," )
snake_case_ : Optional[Any] = [_re_replace_pattern.search(__SCREAMING_SNAKE_CASE ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
snake_case_ , snake_case_ , snake_case_ : Optional[Any] = pattern.groups()
snake_case_ : Optional[int] = re.sub(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
if option.strip() == "all-casing":
snake_case_ : List[Any] = re.sub(obja.lower(), obja.lower(), __SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = re.sub(obja.upper(), obja.upper(), __SCREAMING_SNAKE_CASE )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
snake_case_ : Optional[int] = blackify(lines[start_index - 1] + theoretical_code )
snake_case_ : Optional[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
snake_case_ : List[str] = lines[:start_index] + [theoretical_code] + lines[line_index:]
snake_case_ : List[str] = start_index + 1
if overwrite and len(__SCREAMING_SNAKE_CASE ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(__SCREAMING_SNAKE_CASE, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(__SCREAMING_SNAKE_CASE )
return diffs
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
snake_case_ : List[str] = glob.glob(os.path.join(__SCREAMING_SNAKE_CASE, "**/*.py" ), recursive=__SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = []
for filename in all_files:
snake_case_ : Tuple = is_copy_consistent(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(__SCREAMING_SNAKE_CASE ) > 0:
snake_case_ : str = "\n".join(__SCREAMING_SNAKE_CASE )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 92 | 0 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
def constraint_to_multiple_of(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=0 , lowerCAmelCase__=None ):
UpperCAmelCase_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCAmelCase_ = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCAmelCase_ = math.ceil(val / multiple ) * multiple
return x
UpperCAmelCase_ = (output_size, output_size) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else output_size
UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = output_size
# determine new height and width
UpperCAmelCase_ = output_height / input_height
UpperCAmelCase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCAmelCase_ = scale_width
else:
# fit height
UpperCAmelCase_ = scale_height
UpperCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=lowerCAmelCase__ )
UpperCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=lowerCAmelCase__ )
return (new_height, new_width)
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : Optional[int] , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 1 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , **_UpperCAmelCase : int , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = size if size is not None else {"height": 384, "width": 384}
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = keep_aspect_ratio
UpperCAmelCase_ = ensure_multiple_of
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : List[Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 1 , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[Any] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
UpperCAmelCase_ = get_resize_output_image_size(
_UpperCAmelCase , output_size=(size["height"], size["width"]) , keep_aspect_ratio=_UpperCAmelCase , multiple=_UpperCAmelCase , )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : List[str] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
'''simple docstring'''
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Any , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : int , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : int = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : int = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : Any , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase )
UpperCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Tuple] = None ) -> int:
'''simple docstring'''
UpperCAmelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(_UpperCAmelCase ):
UpperCAmelCase_ = target_sizes.numpy()
UpperCAmelCase_ = []
for idx in range(len(_UpperCAmelCase ) ):
UpperCAmelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=_UpperCAmelCase )
UpperCAmelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_UpperCAmelCase )
else:
UpperCAmelCase_ = logits.argmax(dim=1 )
UpperCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 82 |
from string import ascii_uppercase
__A = {str(ord(c) - 55): c for c in ascii_uppercase}
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
__lowerCamelCase = ''
__lowerCamelCase = 0
__lowerCamelCase = 0
while div != 1:
__lowerCamelCase , __lowerCamelCase = divmod(UpperCamelCase__ , UpperCamelCase__ )
if base >= 11 and 9 < mod < 36:
__lowerCamelCase = ALPHABET_VALUES[str(UpperCamelCase__ )]
else:
__lowerCamelCase = str(UpperCamelCase__ )
new_value += actual_value
__lowerCamelCase = num // base
__lowerCamelCase = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(UpperCamelCase__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(10_00):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 469 | 0 |
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
) | 267 |
'''simple docstring'''
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 267 | 1 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
lowercase__ = emb.weight.data
return lin_layer
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
lowercase__ = {}
for old_key in state_dict.keys():
lowercase__ = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowercase__ = key.replace('''moe_layer.experts.0''' , f'ffn.experts.expert_{expert_idx}' )
else:
lowercase__ = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowercase__ = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowercase__ = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowercase__ = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowercase__ = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowercase__ = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowercase__ = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowercase__ = state_dict[old_key]
return new_dict
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = WEIGHTS_NAME ):
"""simple docstring"""
lowercase__ = []
lowercase__ = 0
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
for expert in range(SCREAMING_SNAKE_CASE ):
lowercase__ = switch_checkpoint_path + f'-rank-{expert}.pt'
if os.path.isfile(SCREAMING_SNAKE_CASE ):
lowercase__ = torch.load(SCREAMING_SNAKE_CASE )['''model''']
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
lowercase__ = rename_fairseq_keys(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = os.path.join(
SCREAMING_SNAKE_CASE , weights_name.replace('''.bin''' , f'-{len(SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin' ) )
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(SCREAMING_SNAKE_CASE )[0]].dtype )
# Add the last block
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , weights_name.replace('''.bin''' , f'-{len(SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin' ) )
lowercase__ = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
lowercase__ = rename_fairseq_keys(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(SCREAMING_SNAKE_CASE ) == 1:
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Otherwise, let's build the index
lowercase__ = {}
for idx, shard in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ = weights_name.replace('''.bin''' , f'-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE ):05d}.bin' )
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , weights_name.replace('''.bin''' , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
for key in shard:
lowercase__ = shard_file
# Add the metadata
lowercase__ = {'''total_size''': total_size}
lowercase__ = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' , encoding='''utf-8''' ) as f:
lowercase__ = json.dumps(SCREAMING_SNAKE_CASE , indent=2 , sort_keys=SCREAMING_SNAKE_CASE ) + '''\n'''
f.write(SCREAMING_SNAKE_CASE )
return metadata, index
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
lowerCAmelCase = parser.parse_args()
lowerCAmelCase, lowerCAmelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
lowerCAmelCase = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
lowerCAmelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 43 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=7 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Optional[int]=30 , UpperCAmelCase_ : Any=400 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Dict=0.9 , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , )-> List[str]:
"""simple docstring"""
UpperCamelCase = size if size is not None else {"shortest_edge": 30}
UpperCamelCase = crop_size if crop_size is not None else {"height": 30, "width": 30}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize_and_center_crop
UpperCamelCase = size
UpperCamelCase = crop_pct
UpperCamelCase = crop_size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> str:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : List[Any] = PoolFormerImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> str:
"""simple docstring"""
UpperCamelCase = PoolFormerImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "crop_pct" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std" ) )
def _SCREAMING_SNAKE_CASE ( self : int )-> List[Any]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Any:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Dict:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> List[str]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : str )-> str:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 554 | 0 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
UpperCAmelCase_ : Optional[int] = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
UpperCAmelCase_ : Dict = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _lowerCAmelCase ( _a : Tuple ) -> Optional[Any]:
lowerCAmelCase_ : int = (images / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ : List[str] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase_ : Tuple = numpy_to_pil(_a )
return images
def _lowerCAmelCase ( _a : Optional[Any] ) -> Union[str, Any]:
if images.ndim == 3:
lowerCAmelCase_ : str = images[None, ...]
lowerCAmelCase_ : Optional[int] = (images * 2_55).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowerCAmelCase_ : Any = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
lowerCAmelCase_ : str = [Image.fromarray(_a ) for image in images]
return pil_images
| 440 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowercase__ ( __A ):
__UpperCamelCase = ["""vqvae"""]
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , ):
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase , mel=_lowercase , vqvae=_lowercase )
def UpperCAmelCase__ ( self ):
return 50 if isinstance(self.scheduler , _lowercase ) else 1_000
@torch.no_grad()
def __call__( self , _lowercase = 1 , _lowercase = None , _lowercase = None , _lowercase = 0 , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = 0 , _lowercase = 0 , _lowercase = None , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase=True , ):
lowerCAmelCase_ : Tuple = steps or self.get_default_steps()
self.scheduler.set_timesteps(_lowercase )
lowerCAmelCase_ : Union[str, Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowerCAmelCase_ : Optional[Any] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowerCAmelCase_ : Tuple = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_lowercase , device=self.device , )
lowerCAmelCase_ : List[str] = noise
lowerCAmelCase_ : List[str] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_lowercase , _lowercase )
lowerCAmelCase_ : Tuple = self.mel.audio_slice_to_image(_lowercase )
lowerCAmelCase_ : List[Any] = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
lowerCAmelCase_ : Optional[Any] = (input_image / 255) * 2 - 1
lowerCAmelCase_ : Optional[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowerCAmelCase_ : List[Any] = self.vqvae.encode(torch.unsqueeze(_lowercase , 0 ) ).latent_dist.sample(
generator=_lowercase )[0]
lowerCAmelCase_ : str = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowerCAmelCase_ : str = self.scheduler.add_noise(_lowercase , _lowercase , self.scheduler.timesteps[start_step - 1] )
lowerCAmelCase_ : Dict = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowerCAmelCase_ : Union[str, Any] = int(mask_start_secs * pixels_per_second )
lowerCAmelCase_ : str = int(mask_end_secs * pixels_per_second )
lowerCAmelCase_ : List[Any] = self.scheduler.add_noise(_lowercase , _lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _lowercase ):
lowerCAmelCase_ : List[Any] = self.unet(_lowercase , _lowercase , _lowercase )["""sample"""]
else:
lowerCAmelCase_ : Any = self.unet(_lowercase , _lowercase )["""sample"""]
if isinstance(self.scheduler , _lowercase ):
lowerCAmelCase_ : str = self.scheduler.step(
model_output=_lowercase , timestep=_lowercase , sample=_lowercase , eta=_lowercase , generator=_lowercase , )["""prev_sample"""]
else:
lowerCAmelCase_ : List[str] = self.scheduler.step(
model_output=_lowercase , timestep=_lowercase , sample=_lowercase , generator=_lowercase , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
lowerCAmelCase_ : Optional[Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
lowerCAmelCase_ : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowerCAmelCase_ : Dict = 1 / self.vqvae.config.scaling_factor * images
lowerCAmelCase_ : Dict = self.vqvae.decode(_lowercase )["""sample"""]
lowerCAmelCase_ : List[Any] = (images / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowerCAmelCase_ : Optional[Any] = (images * 255).round().astype("""uint8""" )
lowerCAmelCase_ : str = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_lowercase , mode="""RGB""" ).convert("""L""" ) for _ in images) )
lowerCAmelCase_ : str = [self.mel.image_to_audio(_lowercase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(_lowercase ) )
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase = 50 ):
assert isinstance(self.scheduler , _lowercase )
self.scheduler.set_timesteps(_lowercase )
lowerCAmelCase_ : List[Any] = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
lowerCAmelCase_ : List[str] = (sample / 255) * 2 - 1
lowerCAmelCase_ : Optional[Any] = torch.Tensor(_lowercase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowerCAmelCase_ : int = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowerCAmelCase_ : Optional[int] = self.scheduler.alphas_cumprod[t]
lowerCAmelCase_ : Any = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowerCAmelCase_ : Optional[int] = 1 - alpha_prod_t
lowerCAmelCase_ : Union[str, Any] = self.unet(_lowercase , _lowercase )["""sample"""]
lowerCAmelCase_ : int = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowerCAmelCase_ : Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowerCAmelCase_ : Optional[Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase__ ( _lowercase , _lowercase , _lowercase ):
lowerCAmelCase_ : Optional[int] = acos(torch.dot(torch.flatten(_lowercase ) , torch.flatten(_lowercase ) ) / torch.norm(_lowercase ) / torch.norm(_lowercase ) )
return sin((1 - alpha) * theta ) * xa / sin(_lowercase ) + sin(alpha * theta ) * xa / sin(_lowercase )
| 440 | 1 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
UpperCAmelCase : Optional[int] = datasets.utils.logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = ["""names""", """prefix"""]
UpperCAmelCase : List[Any] = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
UpperCAmelCase : str = ["""encoding_errors""", """on_bad_lines"""]
UpperCAmelCase : Tuple = ["""date_format"""]
@dataclass
class __lowercase ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCamelCase : Dict = ","
UpperCamelCase : Optional[Any] = None
UpperCamelCase : Tuple = "infer"
UpperCamelCase : List[str] = None
UpperCamelCase : Dict = None
UpperCamelCase : Optional[Any] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : int = None
UpperCamelCase : str = True
UpperCamelCase : List[Any] = None
UpperCamelCase : Tuple = None
UpperCamelCase : List[Any] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : int = False
UpperCamelCase : List[str] = None
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Tuple = None
UpperCamelCase : int = True
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : Tuple = False
UpperCamelCase : Dict = True
UpperCamelCase : Dict = None
UpperCamelCase : Any = "."
UpperCamelCase : Tuple = None
UpperCamelCase : int = "\""
UpperCamelCase : Any = 0
UpperCamelCase : Optional[Any] = None
UpperCamelCase : Optional[Any] = None
UpperCamelCase : List[Any] = None
UpperCamelCase : Optional[Any] = None
UpperCamelCase : List[Any] = True
UpperCamelCase : Tuple = True
UpperCamelCase : Optional[int] = 0
UpperCamelCase : List[Any] = True
UpperCamelCase : Optional[int] = False
UpperCamelCase : List[Any] = None
UpperCamelCase : List[str] = 1_0_0_0_0
UpperCamelCase : List[Any] = None
UpperCamelCase : int = "strict"
UpperCamelCase : Any = "error"
UpperCamelCase : Optional[Any] = None
def __A ( self ) -> str:
'''simple docstring'''
if self.delimiter is not None:
lowerCamelCase = self.delimiter
if self.column_names is not None:
lowerCamelCase = self.column_names
@property
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , a_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __lowercase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCamelCase : Optional[int] = CsvConfig
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A ) -> Any:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
lowerCamelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a_ , (str, list, tuple) ):
lowerCamelCase = data_files
if isinstance(a_ , a_ ):
lowerCamelCase = [files]
lowerCamelCase = [dl_manager.iter_files(a_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
lowerCamelCase = []
for split_name, files in data_files.items():
if isinstance(a_ , a_ ):
lowerCamelCase = [files]
lowerCamelCase = [dl_manager.iter_files(a_ ) for file in files]
splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
lowerCamelCase = self.config.features.arrow_schema
if all(not require_storage_cast(a_ ) for feature in self.config.features.values() ):
# cheaper cast
lowerCamelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=a_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
lowerCamelCase = table_cast(a_ , a_ )
return pa_table
def __A ( self , A ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
lowerCamelCase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(a_ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ):
lowerCamelCase = pd.read_csv(a_ , iterator=a_ , dtype=a_ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(a_ ):
lowerCamelCase = pa.Table.from_pandas(a_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(a_ )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(a_ )}: {e}' )
raise
| 457 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCAmelCase : List[str] = """src/transformers"""
# Matches is_xxx_available()
lowerCAmelCase : List[str] = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase : str = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase : Optional[int] = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCAmelCase : Optional[int] = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase : str = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase : List[Any] = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase : Dict = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase : Tuple = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase : Optional[int] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCAmelCase : Optional[int] = re.compile(R"""^\s*try:""")
# Catches a line with else:
lowerCAmelCase : Optional[int] = re.compile(R"""^\s*else:""")
def _A ( A ) -> List[Any]:
if _re_test_backend.search(A ) is None:
return None
lowercase : Union[str, Any] = [b[0] for b in _re_backend.findall(A )]
backends.sort()
return "_and_".join(A )
def _A ( A ) -> Optional[Any]:
with open(A ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
lowercase : Tuple = f.readlines()
lowercase : List[str] = 0
while line_index < len(A ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(A ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase : Union[str, Any] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowercase : Any = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(A ):
lowercase : int = _re_one_line_import_struct.search(A ).groups()[0]
lowercase : Union[str, Any] = re.findall("\[([^\]]+)\]" ,A )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowercase : Optional[Any] = _re_import_struct_key_value.search(A )
if single_line_import_search is not None:
lowercase : List[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(A ) > 0]
objects.extend(A )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowercase : Tuple = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase : int = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase : str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowercase : List[Any] = lines[line_index]
if _re_import_struct_add_one.search(A ) is not None:
objects.append(_re_import_struct_add_one.search(A ).groups()[0] )
elif _re_import_struct_add_many.search(A ) is not None:
lowercase : Dict = _re_import_struct_add_many.search(A ).groups()[0].split(", " )
lowercase : Any = [obj[1:-1] for obj in imports if len(A ) > 0]
objects.extend(A )
elif _re_between_brackets.search(A ) is not None:
lowercase : Optional[Any] = _re_between_brackets.search(A ).groups()[0].split(", " )
lowercase : Tuple = [obj[1:-1] for obj in imports if len(A ) > 0]
objects.extend(A )
elif _re_quote_object.search(A ) is not None:
objects.append(_re_quote_object.search(A ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 1_2 + "\"" ):
objects.append(line[1_3:-3] )
line_index += 1
lowercase : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase : str = []
while (
line_index < len(A )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowercase : int = lines[line_index]
lowercase : Any = _re_import.search(A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase : Optional[Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(A ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase : Optional[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowercase : List[Any] = lines[line_index]
lowercase : Tuple = _re_import.search(A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
lowercase : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _A ( A ,A ) -> List[str]:
def find_duplicates(A ):
return [k for k, v in collections.Counter(A ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase : str = []
for key in import_dict_objects.keys():
lowercase : List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowercase : List[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase : Optional[int] = "base imports" if key == "none" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def _A ( ) -> List[Any]:
lowercase : Any = []
for root, _, files in os.walk(A ):
if "__init__.py" in files:
lowercase : Optional[Any] = os.path.join(A ,"__init__.py" )
lowercase : Tuple = parse_init(A )
if objects is not None:
lowercase : Tuple = analyze_results(*A )
if len(A ) > 0:
lowercase : Tuple = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("\n".join(A ) )
if len(A ) > 0:
raise ValueError("\n\n".join(A ) )
def _A ( ) -> Union[str, Any]:
lowercase : Dict = []
for path, directories, files in os.walk(A ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(A )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(A ) / folder).glob("*.py" ) ) ) == 0:
continue
lowercase : str = str((Path(A ) / folder).relative_to(A ) )
lowercase : Optional[Any] = short_path.replace(os.path.sep ,"." )
submodules.append(A )
for fname in files:
if fname == "__init__.py":
continue
lowercase : Tuple = str((Path(A ) / fname).relative_to(A ) )
lowercase : int = short_path.replace(".py" ,"" ).replace(os.path.sep ,"." )
if len(submodule.split("." ) ) == 1:
submodules.append(A )
return submodules
lowerCAmelCase : Dict = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def _A ( ) -> Optional[int]:
# This is to make sure the transformers module imported is the one in the repo.
lowercase : int = importlib.util.spec_from_file_location(
"transformers" ,os.path.join(A ,"__init__.py" ) ,submodule_search_locations=[PATH_TO_TRANSFORMERS] ,)
lowercase : Optional[int] = spec.loader.load_module()
lowercase : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(A ) > 0:
lowercase : Optional[int] = "\n".join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F'''{list_of_modules}\n'''
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 372 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __UpperCamelCase :
@staticmethod
def _a ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowercase_ : str = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class __UpperCamelCase (unittest.TestCase ):
__A = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowercase = pipeline(
"""document-question-answering""" , model=_lowerCAmelCase , tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowercase = INVOICE_URL
lowercase = list(zip(*apply_tesseract(load_image(_lowerCAmelCase ) , _lowerCAmelCase , """""" ) ) )
lowercase = """What is the placebo?"""
lowercase = [
{
"""image""": load_image(_lowerCAmelCase ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase = dqa_pipeline(_lowerCAmelCase , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [
[
{"""score""": ANY(_lowerCAmelCase ), """answer""": ANY(_lowerCAmelCase ), """start""": ANY(_lowerCAmelCase ), """end""": ANY(_lowerCAmelCase )},
{"""score""": ANY(_lowerCAmelCase ), """answer""": ANY(_lowerCAmelCase ), """start""": ANY(_lowerCAmelCase ), """end""": ANY(_lowerCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _a ( self ) -> str:
'''simple docstring'''
lowercase = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
lowercase = INVOICE_URL
lowercase = """How many cats are there?"""
lowercase = [
{"""score""": 0.0001, """answer""": """oy 2312/2019""", """start""": 38, """end""": 39},
{"""score""": 0.0001, """answer""": """oy 2312/2019 DUE""", """start""": 38, """end""": 40},
]
lowercase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(_lowerCAmelCase , decimals=4 ) , _lowerCAmelCase )
lowercase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(_lowerCAmelCase , decimals=4 ) , _lowerCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowercase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(_lowerCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowercase = []
lowercase = []
lowercase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , words=_lowerCAmelCase , boxes=_lowerCAmelCase , top_k=2 )
self.assertEqual(_lowerCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _a ( self ) -> str:
'''simple docstring'''
lowercase = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
lowercase = INVOICE_URL
lowercase = """What is the invoice number?"""
lowercase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
lowercase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
lowercase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
[
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , )
lowercase = INVOICE_URL
lowercase = """What is the invoice number?"""
lowercase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
lowercase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
lowercase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
[
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=_lowerCAmelCase )
lowercase = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=_lowerCAmelCase , revision="""3dc6de3""" , )
lowercase = INVOICE_URL
lowercase = """What is the invoice number?"""
lowercase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
lowercase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
lowercase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
[
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
]
]
* 2 , )
lowercase = list(zip(*apply_tesseract(load_image(_lowerCAmelCase ) , _lowerCAmelCase , """""" ) ) )
# This model should also work if `image` is set to None
lowercase = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=_lowerCAmelCase )
lowercase = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=_lowerCAmelCase , revision="""3dc6de3""" , max_seq_len=50 , )
lowercase = INVOICE_URL
lowercase = """What is the invoice number?"""
lowercase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
lowercase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
[
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
lowercase = list(zip(*apply_tesseract(load_image(_lowerCAmelCase ) , _lowerCAmelCase , """""" ) ) )
# This model should also work if `image` is set to None
lowercase = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
@slow
@require_torch
def _a ( self ) -> str:
'''simple docstring'''
lowercase = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
lowercase = INVOICE_URL
lowercase = """What is the invoice number?"""
lowercase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def _a ( self ) -> Dict:
'''simple docstring'''
pass
| 653 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {'''vocab_file''': '''spm_char.model'''}
lowercase_ : int = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
lowercase_ : Optional[Any] = {
'''microsoft/speecht5_asr''': 1024,
'''microsoft/speecht5_tts''': 1024,
'''microsoft/speecht5_vc''': 1024,
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
@property
def _a ( self ) -> List[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self ) -> str:
'''simple docstring'''
lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = self.sp_model.IdToPiece(_lowerCAmelCase )
return token
def _a ( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = []
lowercase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowercase = []
else:
current_sub_tokens.append(_lowerCAmelCase )
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
lowercase = [1]
if token_ids_a is None:
return ([0] * len(_lowerCAmelCase )) + suffix_ones
return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 653 | 1 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCAmelCase: Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowercase( __a : Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , __a , )
if isinstance(__a , torch.Tensor ):
return image
elif isinstance(__a , PIL.Image.Image ):
a__ =[image]
if isinstance(image[0] , PIL.Image.Image ):
a__ , a__ =image[0].size
a__ , a__ =(x - x % 8 for x in (w, h)) # resize to integer multiple of 8
a__ =[np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
a__ =np.concatenate(__a , axis=0 )
a__ =np.array(__a ).astype(np.floataa ) / 2_55.0
a__ =image.transpose(0 , 3 , 1 , 2 )
a__ =2.0 * image - 1.0
a__ =torch.from_numpy(__a )
elif isinstance(image[0] , torch.Tensor ):
a__ =torch.cat(__a , dim=0 )
return image
def _lowercase( __a : Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(__a , torch.Tensor ):
return mask
elif isinstance(__a , PIL.Image.Image ):
a__ =[mask]
if isinstance(mask[0] , PIL.Image.Image ):
a__ , a__ =mask[0].size
a__ , a__ =(x - x % 32 for x in (w, h)) # resize to integer multiple of 32
a__ =[np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
a__ =np.concatenate(__a , axis=0 )
a__ =mask.astype(np.floataa ) / 2_55.0
a__ =0
a__ =1
a__ =torch.from_numpy(__a )
elif isinstance(mask[0] , torch.Tensor ):
a__ =torch.cat(__a , dim=0 )
return mask
class lowercase_ (lowercase__ ):
snake_case =42
snake_case =42
def __init__( self , lowercase_ , lowercase_) -> Tuple:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_ , lowercase_ = 250 , lowercase_ = 0.0 , lowercase_ = 10 , lowercase_ = 10 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
a__ =image
a__ =_preprocess_image(lowercase_)
a__ =original_image.to(device=self.device , dtype=self.unet.dtype)
a__ =_preprocess_mask(lowercase_)
a__ =mask_image.to(device=self.device , dtype=self.unet.dtype)
a__ =original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowercase_ , lowercase_) and len(lowercase_) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowercase_)}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""")
a__ =original_image.shape
a__ =randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=self.unet.dtype)
# set step values
self.scheduler.set_timesteps(lowercase_ , lowercase_ , lowercase_ , self.device)
a__ =eta
a__ =self.scheduler.timesteps[0] + 1
a__ =generator[0] if isinstance(lowercase_ , lowercase_) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
if t < t_last:
# predict the noise residual
a__ =self.unet(lowercase_ , lowercase_).sample
# compute previous image: x_t -> x_t-1
a__ =self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
a__ =self.scheduler.undo_step(lowercase_ , lowercase_ , lowercase_)
a__ =t
a__ =(image / 2 + 0.5).clamp(0 , 1)
a__ =image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
a__ =self.numpy_to_pil(lowercase_)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_)
| 20 |
def _lowercase( __a : list[int] ):
a__ =len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
a__ , a__ =numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCAmelCase: Tuple = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase: int = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 20 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """deberta-v2"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Optional[int]=1_2_8_1_0_0 , SCREAMING_SNAKE_CASE_ : List[str]=1_5_3_6 , SCREAMING_SNAKE_CASE_ : List[Any]=2_4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2_4 , SCREAMING_SNAKE_CASE_ : Any=6_1_4_4 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=5_1_2 , SCREAMING_SNAKE_CASE_ : Tuple=0 , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : List[str]=1E-7 , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Dict=-1 , SCREAMING_SNAKE_CASE_ : Optional[int]=0 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : Tuple = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : Dict = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Any = type_vocab_size
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : Any = relative_attention
lowerCAmelCase_ : Tuple = max_relative_positions
lowerCAmelCase_ : Dict = pad_token_id
lowerCAmelCase_ : Dict = position_biased_input
# Backwards compatibility
if type(SCREAMING_SNAKE_CASE_ ) == str:
lowerCAmelCase_ : Tuple = [x.strip() for x in pos_att_type.lower().split('|' )]
lowerCAmelCase_ : Tuple = pos_att_type
lowerCAmelCase_ : str = vocab_size
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : Any = kwargs.get('pooler_hidden_size' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = pooler_dropout
lowerCAmelCase_ : Optional[Any] = pooler_hidden_act
class UpperCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
if self.task == "multiple-choice":
lowerCAmelCase_ : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase_ : str = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
return 1_2
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] = -1 , SCREAMING_SNAKE_CASE_ : Any = -1 , SCREAMING_SNAKE_CASE_ : Optional[int] = -1 , SCREAMING_SNAKE_CASE_ : Optional[Any] = False , SCREAMING_SNAKE_CASE_ : List[str] = None , SCREAMING_SNAKE_CASE_ : Tuple = 3 , SCREAMING_SNAKE_CASE_ : Any = 4_0 , SCREAMING_SNAKE_CASE_ : Optional[Any] = 4_0 , SCREAMING_SNAKE_CASE_ : List[str] = None , ):
lowerCAmelCase_ : int = super().generate_dummy_inputs(preprocessor=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 720 |
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VQModel
_SCREAMING_SNAKE_CASE = """sample"""
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=(3_2, 3_2) ):
lowerCAmelCase_ : Tuple = 4
lowerCAmelCase_ : Optional[Any] = 3
lowerCAmelCase_ : int = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return (3, 3_2, 3_2)
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return (3, 3_2, 3_2)
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : Union[str, Any] = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
lowerCAmelCase_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self : str ):
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[Any] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : Any = VQModel.from_pretrained('fusing/vqgan-dummy' )
model.to(SCREAMING_SNAKE_CASE_ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCAmelCase_ : int = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowerCAmelCase_ : Dict = image.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowerCAmelCase_ : int = model(SCREAMING_SNAKE_CASE_ ).sample
lowerCAmelCase_ : Union[str, Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase_ : int = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
| 317 | 0 |
'''simple docstring'''
class __A :
def __init__( self , UpperCamelCase_ ):
__UpperCAmelCase : Any = val
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : str = None
def _snake_case ( self , UpperCamelCase_ ):
if self.val:
if val < self.val:
if self.left is None:
__UpperCAmelCase : Dict = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
__UpperCAmelCase : Dict = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
__UpperCAmelCase : Optional[Any] = val
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
if root:
inorder(root.left , _lowercase )
res.append(root.val )
inorder(root.right , _lowercase )
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
if len(_lowercase ) == 0:
return arr
__UpperCAmelCase : Union[str, Any] = Node(arr[0] )
for i in range(1 , len(_lowercase ) ):
root.insert(arr[i] )
# Traverse BST in order.
__UpperCAmelCase : Optional[Any] = []
inorder(_lowercase , _lowercase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 168 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
snake_case_ : str = logging.get_logger(__name__)
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = ['''pixel_values''']
def __init__( self : Any , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : bool = True , lowercase : Union[int, float] = 1 / 2_55 , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : bool = True , **lowercase : Tuple , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase : Optional[int] = size if size is not None else {"shortest_edge": 2_24}
UpperCAmelCase : Any = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {"height": 2_56, "width": 2_56}
UpperCAmelCase : List[Any] = get_size_dict(lowercase , param_name="crop_size" )
UpperCAmelCase : str = do_resize
UpperCAmelCase : Union[str, Any] = size
UpperCAmelCase : Any = resample
UpperCAmelCase : str = do_rescale
UpperCAmelCase : List[str] = rescale_factor
UpperCAmelCase : List[str] = do_center_crop
UpperCAmelCase : Any = crop_size
UpperCAmelCase : str = do_flip_channel_order
def __lowerCAmelCase ( self : Optional[int] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PIL.Image.BILINEAR , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[Any] , ):
'''simple docstring'''
UpperCAmelCase : Dict = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase : Dict = get_resize_output_image_size(lowercase , size=size["shortest_edge"] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def __lowerCAmelCase ( self : List[str] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[Any] , ):
'''simple docstring'''
UpperCAmelCase : int = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowercase , size=(size["height"], size["width"]) , data_format=lowercase , **lowercase )
def __lowerCAmelCase ( self : Any , lowercase : np.ndarray , lowercase : Union[int, float] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[Any] , ):
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def __lowerCAmelCase ( self : Optional[int] , lowercase : np.ndarray , lowercase : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
return flip_channel_order(lowercase , data_format=lowercase )
def __lowerCAmelCase ( self : Optional[Any] , lowercase : ImageInput , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : float = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : bool = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : ChannelDimension = ChannelDimension.FIRST , **lowercase : str , ):
'''simple docstring'''
UpperCAmelCase : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : List[str] = resample if resample is not None else self.resample
UpperCAmelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase : Dict = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCAmelCase : Optional[int] = size if size is not None else self.size
UpperCAmelCase : Any = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase : int = get_size_dict(lowercase , param_name="crop_size" )
UpperCAmelCase : List[str] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
UpperCAmelCase : List[str] = [to_numpy_array(lowercase ) for image in images]
if do_resize:
UpperCAmelCase : Dict = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
UpperCAmelCase : Optional[Any] = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
UpperCAmelCase : Optional[Any] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCAmelCase : Tuple = [self.flip_channel_order(image=lowercase ) for image in images]
UpperCAmelCase : Tuple = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
UpperCAmelCase : Tuple = {"pixel_values": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
def __lowerCAmelCase ( self : Dict , lowercase : Union[str, Any] , lowercase : List[Tuple] = None ):
'''simple docstring'''
UpperCAmelCase : int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase ) != len(lowercase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowercase ):
UpperCAmelCase : Optional[Any] = target_sizes.numpy()
UpperCAmelCase : Tuple = []
for idx in range(len(lowercase ) ):
UpperCAmelCase : Tuple = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowercase )
UpperCAmelCase : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase )
else:
UpperCAmelCase : Dict = logits.argmax(dim=1 )
UpperCAmelCase : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 595 | 0 |
'''simple docstring'''
from math import sqrt
def __UpperCAmelCase ( A : int = 1_0_0_0_0_0_0 ) -> int:
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Optional[int] = 4_2
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowerCamelCase_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 712 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase : Optional[int] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : int = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 216 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 'deberta-v2'
def __init__( self , _lowerCAmelCase=128100 , _lowerCAmelCase=1536 , _lowerCAmelCase=24 , _lowerCAmelCase=24 , _lowerCAmelCase=6144 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=0 , _lowerCAmelCase=0.0_2 , _lowerCAmelCase=1e-7 , _lowerCAmelCase=False , _lowerCAmelCase=-1 , _lowerCAmelCase=0 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=0 , _lowerCAmelCase="gelu" , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
UpperCAmelCase__ : List[str] = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : Any = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = max_position_embeddings
UpperCAmelCase__ : Tuple = type_vocab_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : List[Any] = relative_attention
UpperCAmelCase__ : Tuple = max_relative_positions
UpperCAmelCase__ : List[str] = pad_token_id
UpperCAmelCase__ : Any = position_biased_input
# Backwards compatibility
if type(_lowerCAmelCase ) == str:
UpperCAmelCase__ : Tuple = [x.strip() for x in pos_att_type.lower().split("""|""" )]
UpperCAmelCase__ : Dict = pos_att_type
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : Dict = layer_norm_eps
UpperCAmelCase__ : Any = kwargs.get("""pooler_hidden_size""" , _lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = pooler_dropout
UpperCAmelCase__ : int = pooler_hidden_act
class UpperCAmelCase_ ( __lowerCamelCase ):
@property
def __UpperCAmelCase ( self ):
if self.task == "multiple-choice":
UpperCAmelCase__ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase__ : Tuple = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def __UpperCAmelCase ( self ):
return 12
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = 3 , _lowerCAmelCase = 40 , _lowerCAmelCase = 40 , _lowerCAmelCase = None , ):
UpperCAmelCase__ : int = super().generate_dummy_inputs(preprocessor=_lowerCAmelCase , framework=_lowerCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 79 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=30 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=32 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=10 , _lowerCAmelCase=0.0_2 , _lowerCAmelCase=3 , _lowerCAmelCase=None , ):
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : Union[str, Any] = image_size
UpperCAmelCase__ : int = patch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : int = is_training
UpperCAmelCase__ : List[str] = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : int = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Any = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ : Any = (image_size // patch_size) ** 2
UpperCAmelCase__ : Tuple = num_patches + 1
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = None
if self.use_labels:
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : str = TFViTModel(config=_lowerCAmelCase )
UpperCAmelCase__ : str = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase__ : Optional[Any] = self.image_size // 2
UpperCAmelCase__ : List[str] = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase__ : List[Any] = model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
UpperCAmelCase__ : str = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : Tuple = self.type_sequence_label_size
UpperCAmelCase__ : List[Any] = TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase__ : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase__ : Tuple = self.image_size // 2
UpperCAmelCase__ : Union[str, Any] = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase__ : List[str] = model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Optional[Any] = TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = config_and_inputs
UpperCAmelCase__ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__lowerCamelCase = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = TFViTModelTester(self )
UpperCAmelCase__ : int = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : str = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , tf.keras.layers.Layer ) )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[int] = model_class(_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(_lowerCAmelCase )
def _lowerCamelCase ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
UpperCAmelCase__ : List[Any] = self.default_image_processor
UpperCAmelCase__ : Union[str, Any] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
UpperCAmelCase__ : int = model(**_lowerCAmelCase )
# verify the logits
UpperCAmelCase__ : Tuple = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
UpperCAmelCase__ : int = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 )
| 79 | 1 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : int = logging.get_logger(__name__)
set_seed(7_70)
_UpperCAmelCase : Optional[Any] = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
_UpperCAmelCase : List[Any] = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
_UpperCAmelCase : Dict = os.path.dirname(os.path.abspath(__file__))
_UpperCAmelCase : Optional[int] = os.path.join(os.path.expanduser('''~'''), '''.cache''')
_UpperCAmelCase : Any = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : int=False ) -> str:
'''simple docstring'''
lowercase =model_type
if use_small:
key += "_small"
return os.path.join(lowercase_ , REMOTE_MODEL_PATHS[key]['''file_name'''] )
def UpperCamelCase ( lowercase_ : int , lowercase_ : str ) -> Union[str, Any]:
'''simple docstring'''
os.makedirs(lowercase_ , exist_ok=lowercase_ )
hf_hub_download(repo_id=lowercase_ , filename=lowercase_ , local_dir=lowercase_ )
def UpperCamelCase ( lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str]=False , lowercase_ : Optional[int]="text" ) -> Optional[Any]:
'''simple docstring'''
if model_type == "text":
lowercase =BarkSemanticModel
lowercase =BarkSemanticConfig
lowercase =BarkSemanticGenerationConfig
elif model_type == "coarse":
lowercase =BarkCoarseModel
lowercase =BarkCoarseConfig
lowercase =BarkCoarseGenerationConfig
elif model_type == "fine":
lowercase =BarkFineModel
lowercase =BarkFineConfig
lowercase =BarkFineGenerationConfig
else:
raise NotImplementedError()
lowercase =f'{model_type}_small' if use_small else model_type
lowercase =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase_ ):
logger.info(f'{model_type} model not found, downloading into `{CACHE_DIR}`.' )
_download(model_info['''repo_id'''] , model_info['''file_name'''] )
lowercase =torch.load(lowercase_ , map_location=lowercase_ )
# this is a hack
lowercase =checkpoint['''model_args''']
if "input_vocab_size" not in model_args:
lowercase =model_args['''vocab_size''']
lowercase =model_args['''vocab_size''']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowercase =model_args.pop('''n_head''' )
lowercase =model_args.pop('''n_embd''' )
lowercase =model_args.pop('''n_layer''' )
lowercase =ConfigClass(**checkpoint['''model_args'''] )
lowercase =ModelClass(config=lowercase_ )
lowercase =GenerationConfigClass()
lowercase =model_generation_config
lowercase =checkpoint['''model''']
# fixup checkpoint
lowercase ='''_orig_mod.'''
for k, v in list(state_dict.items() ):
if k.startswith(lowercase_ ):
# replace part of the key with corresponding layer name in HF implementation
lowercase =k[len(lowercase_ ) :]
for old_layer_name in new_layer_name_dict:
lowercase =new_k.replace(lowercase_ , new_layer_name_dict[old_layer_name] )
lowercase =state_dict.pop(lowercase_ )
lowercase =set(state_dict.keys() ) - set(model.state_dict().keys() )
lowercase ={k for k in extra_keys if not k.endswith('''.attn.bias''' )}
lowercase =set(model.state_dict().keys() ) - set(state_dict.keys() )
lowercase ={k for k in missing_keys if not k.endswith('''.attn.bias''' )}
if len(lowercase_ ) != 0:
raise ValueError(f'extra keys found: {extra_keys}' )
if len(lowercase_ ) != 0:
raise ValueError(f'missing keys: {missing_keys}' )
model.load_state_dict(lowercase_ , strict=lowercase_ )
lowercase =model.num_parameters(exclude_embeddings=lowercase_ )
lowercase =checkpoint['''best_val_loss'''].item()
logger.info(f'model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowercase_ , 3 )} loss' )
model.eval()
model.to(lowercase_ )
del checkpoint, state_dict
return model
def UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : Any=False , lowercase_ : Any="text" ) -> int:
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowercase ='''cpu''' # do conversion on cpu
lowercase =_get_ckpt_path(lowercase_ , use_small=lowercase_ )
lowercase =_load_model(lowercase_ , lowercase_ , model_type=lowercase_ , use_small=lowercase_ )
# load bark initial model
lowercase =_bark_load_model(lowercase_ , '''cpu''' , model_type=lowercase_ , use_small=lowercase_ )
if model_type == "text":
lowercase =bark_model['''model''']
if model.num_parameters(exclude_embeddings=lowercase_ ) != bark_model.get_num_params():
raise ValueError('''initial and new models don\'t have the same number of parameters''' )
# check if same output as the bark model
lowercase =5
lowercase =1_0
if model_type in ["text", "coarse"]:
lowercase =torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
lowercase =bark_model(lowercase_ )[0]
lowercase =model(lowercase_ )
# take last logits
lowercase =output_new_model_total.logits[:, [-1], :]
else:
lowercase =3
lowercase =8
lowercase =torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowercase =model(lowercase_ , lowercase_ )
lowercase =bark_model(lowercase_ , lowercase_ )
lowercase =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('''initial and new outputs don\'t have the same shape''' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('''initial and new outputs are not equal''' )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
def UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Tuple , ) -> int:
'''simple docstring'''
lowercase =os.path.join(lowercase_ , lowercase_ )
lowercase =BarkSemanticConfig.from_pretrained(os.path.join(lowercase_ , '''config.json''' ) )
lowercase =BarkCoarseConfig.from_pretrained(os.path.join(lowercase_ , '''config.json''' ) )
lowercase =BarkFineConfig.from_pretrained(os.path.join(lowercase_ , '''config.json''' ) )
lowercase =EncodecConfig.from_pretrained('''facebook/encodec_24khz''' )
lowercase =BarkSemanticModel.from_pretrained(lowercase_ )
lowercase =BarkCoarseModel.from_pretrained(lowercase_ )
lowercase =BarkFineModel.from_pretrained(lowercase_ )
lowercase =EncodecModel.from_pretrained('''facebook/encodec_24khz''' )
lowercase =BarkConfig.from_sub_model_configs(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowercase =BarkModel(lowercase_ )
lowercase =semantic
lowercase =coarseAcoustic
lowercase =fineAcoustic
lowercase =codec
lowercase =bark_generation_config
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
bark.save_pretrained(lowercase_ , repo_id=lowercase_ , push_to_hub=lowercase_ )
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
_UpperCAmelCase : List[str] = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 714 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCamelCase ( lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase =1.5
lowercase =int(factor * num_class_images )
lowercase =ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=lowercase_ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=lowercase_ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase =client.query(text=lowercase_ )
if len(lowercase_ ) >= factor * num_class_images or num_images > 1E4:
break
else:
lowercase =int(factor * num_images )
lowercase =ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=lowercase_ , aesthetic_weight=0.1 , )
lowercase =0
lowercase =0
lowercase =tqdm(desc='''downloading real regularization images''' , total=lowercase_ )
with open(f'{class_data_dir}/caption.txt' , '''w''' ) as fa, open(f'{class_data_dir}/urls.txt' , '''w''' ) as fa, open(
f'{class_data_dir}/images.txt' , '''w''' ) as fa:
while total < num_class_images:
lowercase =class_images[count]
count += 1
try:
lowercase =requests.get(images['''url'''] )
if img.status_code == 2_0_0:
lowercase =Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f'{class_data_dir}/images/{total}.jpg' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCamelCase ( ) -> Dict:
'''simple docstring'''
lowercase =argparse.ArgumentParser('''''' , add_help=lowercase_ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=lowercase_ , type=lowercase_ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=lowercase_ , type=lowercase_ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_0_0 , type=lowercase_ )
return parser.parse_args()
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 145 | 0 |
from __future__ import annotations
import math
from collections.abc import Callable
def lowercase ( __A : List[Any] , __A : Any , __A : Tuple , __A : Optional[Any] = 100 , ) -> float:
'''simple docstring'''
snake_case : Dict = x_start
snake_case : str = fnc(lowerCamelCase_ )
snake_case : Tuple = 0.0
for _ in range(lowerCamelCase_ ):
# Approximates curve as a sequence of linear lines and sums their length
snake_case : List[Any] = (x_end - x_start) / steps + xa
snake_case : Dict = fnc(lowerCamelCase_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
snake_case : int = xa
snake_case : Union[str, Any] = fxa
return length
if __name__ == "__main__":
def lowercase ( __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
__lowercase : Any = 10
while i <= 100_000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 36 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCAmelCase__ = 'CompVis/stable-diffusion-v1-1'
lowerCAmelCase__ = 'CompVis/stable-diffusion-v1-2'
lowerCAmelCase__ = 'CompVis/stable-diffusion-v1-3'
lowerCAmelCase__ = 'CompVis/stable-diffusion-v1-4'
class __lowercase (__lowerCamelCase ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : CLIPTextModel , UpperCAmelCase_ : CLIPTokenizer , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase_ : StableDiffusionSafetyChecker , UpperCAmelCase_ : CLIPImageProcessor , UpperCAmelCase_ : bool = True , ):
super()._init_()
UpperCamelCase__ : int = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = StableDiffusionPipeline(
vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , requires_safety_checker=UpperCAmelCase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea)
@property
def __UpperCamelCase ( self : Optional[Any]):
return {k: getattr(self , UpperCAmelCase_) for k in self.config.keys() if not k.startswith('_')}
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Optional[Union[str, int]] = "auto"):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase__ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_)
def __UpperCamelCase ( self : Any):
self.enable_attention_slicing(UpperCAmelCase_)
@torch.no_grad()
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Optional[int] , ):
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Tuple , ):
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : str , ):
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Dict , ):
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Tuple , ):
UpperCamelCase__ : Tuple = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(UpperCAmelCase_)
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` must be divisible by 8 but are {height} and {width}.')
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase__ : Dict = self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase__ : Optional[Any] = self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase__ : Optional[Any] = self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase__ : List[str] = self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]])
| 596 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase( lowercase_ , lowercase_=False , lowercase_=False ) -> Tuple:
'''simple docstring'''
snake_case_ = """backbone.""" if is_semantic else """"""
snake_case_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_=False , lowercase_=False ) -> List[str]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
snake_case_ = """backbone.""" if is_semantic else """"""
# queries, keys and values
snake_case_ = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
snake_case_ = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
snake_case_ = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = q_bias
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
snake_case_ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
snake_case_ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
snake_case_ = gamma_a
snake_case_ = gamma_a
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = dct.pop(lowercase_ )
snake_case_ = val
def UpperCamelCase( ) -> Optional[int]:
'''simple docstring'''
snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_=False ) -> Dict:
'''simple docstring'''
snake_case_ = False if """rvlcdip""" in checkpoint_url else True
snake_case_ = BeitConfig(use_absolute_position_embeddings=lowercase_ , use_mask_token=lowercase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
snake_case_ = 1024
snake_case_ = 4096
snake_case_ = 24
snake_case_ = 16
# labels
if "rvlcdip" in checkpoint_url:
snake_case_ = 16
snake_case_ = """huggingface/label-files"""
snake_case_ = """rvlcdip-id2label.json"""
snake_case_ = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
snake_case_ = {int(lowercase_ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
snake_case_ = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" )["""model"""]
snake_case_ = create_rename_keys(lowercase_ , has_lm_head=lowercase_ )
for src, dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
read_in_q_k_v(lowercase_ , lowercase_ , has_lm_head=lowercase_ )
# load HuggingFace model
snake_case_ = BeitForMaskedImageModeling(lowercase_ ) if has_lm_head else BeitForImageClassification(lowercase_ )
model.eval()
model.load_state_dict(lowercase_ )
# Check outputs on an image
snake_case_ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowercase_ )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowercase_ , return_tensors="""pt""" )
snake_case_ = encoding["""pixel_values"""]
snake_case_ = model(lowercase_ )
snake_case_ = outputs.logits
# verify logits
snake_case_ = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowercase_ ), "Shape of logits not as expected"
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
if has_lm_head:
snake_case_ = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
snake_case_ = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(lowercase_ , lowercase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowercase_ , )
model.push_to_hub(
repo_path_or_name=Path(lowercase_ , lowercase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowercase_ , )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
lowerCamelCase_ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 161 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''vocab.txt'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
lowerCamelCase_ = {
'''openbmb/cpm-ant-10b''': 1024,
}
def UpperCamelCase( lowercase_ ) -> int:
'''simple docstring'''
snake_case_ = collections.OrderedDict()
with open(lowercase_ , """r""" , encoding="""utf-8""" ) as reader:
snake_case_ = reader.readlines()
for index, token in enumerate(lowercase_ ):
snake_case_ = token.rstrip("""\n""" )
snake_case_ = index
return vocab
class __lowerCamelCase ( __snake_case ):
def __init__( self , lowerCamelCase , lowerCamelCase="<unk>" , lowerCamelCase=200 ) -> List[Any]:
snake_case_ = vocab
snake_case_ = unk_token
snake_case_ = max_input_chars_per_word
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Union[str, Any]:
snake_case_ = list(lowerCamelCase )
if len(lowerCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
snake_case_ = 0
snake_case_ = []
while start < len(lowerCamelCase ):
snake_case_ = len(lowerCamelCase )
snake_case_ = None
while start < end:
snake_case_ = """""".join(chars[start:end] )
if substr in self.vocab:
snake_case_ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCamelCase )
snake_case_ = end
return sub_tokens
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Tuple = ['input_ids', 'attention_mask']
lowerCamelCase_ : Optional[int] = False
def __init__( self , lowerCamelCase , lowerCamelCase="<d>" , lowerCamelCase="</d>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase="<unk>" , lowerCamelCase="</n>" , lowerCamelCase="</_>" , lowerCamelCase="left" , **lowerCamelCase , ) -> Any:
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=lowerCamelCase , eod_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , unk_token=lowerCamelCase , line_token=lowerCamelCase , space_token=lowerCamelCase , padding_side=lowerCamelCase , **lowerCamelCase , )
snake_case_ = bod_token
snake_case_ = eod_token
snake_case_ = load_vocab(lowerCamelCase )
snake_case_ = self.encoder[space_token]
snake_case_ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
snake_case_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase : x[1] ) )
snake_case_ = {v: k for k, v in self.encoder.items()}
snake_case_ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowerCAmelCase_ ( self ) -> List[Any]:
return self.encoder[self.bod_token]
@property
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
return self.encoder[self.eod_token]
@property
def lowerCAmelCase_ ( self ) -> Tuple:
return self.encoder["\n"]
@property
def lowerCAmelCase_ ( self ) -> int:
return len(self.encoder )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> List[str]:
snake_case_ = []
for x in jieba.cut(lowerCamelCase , cut_all=lowerCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase ) )
return output_tokens
def lowerCAmelCase_ ( self , lowerCamelCase , **lowerCamelCase ) -> Dict:
snake_case_ = [i for i in token_ids if i >= 0]
snake_case_ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase , **lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Any:
return token in self.encoder
def lowerCAmelCase_ ( self , lowerCamelCase ) -> str:
return "".join(lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> List[Any]:
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Optional[Any]:
return self.decoder.get(lowerCamelCase , self.unk_token )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
if os.path.isdir(lowerCamelCase ):
snake_case_ = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
snake_case_ = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
snake_case_ = 0
if " " in self.encoder:
snake_case_ = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
snake_case_ = self.encoder["""\n"""]
del self.encoder["\n"]
snake_case_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase : x[1] ) )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
snake_case_ = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase ))
return [1] + ([0] * len(lowerCamelCase )) | 161 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.