code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = nn.functional.normalize(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = nn.functional.normalize(_UpperCAmelCase )
return torch.mm(_UpperCAmelCase , normalized_text_embeds.t() )
class __lowercase ( lowerCAmelCase__ ):
"""simple docstring"""
_UpperCAmelCase : str = CLIPConfig
_UpperCAmelCase : Tuple = ['''CLIPEncoderLayer''']
def __init__( self : Tuple , lowerCAmelCase__ : Dict):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = CLIPVisionModel(config.vision_config)
SCREAMING_SNAKE_CASE_: str = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = nn.Parameter(torch.ones(17 , config.projection_dim) , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = nn.Parameter(torch.ones(3 , config.projection_dim) , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = nn.Parameter(torch.ones(17) , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = nn.Parameter(torch.ones(3) , requires_grad=lowerCAmelCase__)
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.vision_model(lowerCAmelCase__)[1] # pooled_output
SCREAMING_SNAKE_CASE_: Optional[int] = self.visual_projection(lowerCAmelCase__)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE_: Union[str, Any] = cosine_distance(lowerCAmelCase__ , self.special_care_embeds).cpu().float().numpy()
SCREAMING_SNAKE_CASE_: Union[str, Any] = cosine_distance(lowerCAmelCase__ , self.concept_embeds).cpu().float().numpy()
SCREAMING_SNAKE_CASE_: List[Any] = []
SCREAMING_SNAKE_CASE_: Dict = image_embeds.shape[0]
for i in range(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: List[Any] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE_: Dict = 0.0
for concept_idx in range(len(special_cos_dist[0])):
SCREAMING_SNAKE_CASE_: Dict = special_cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE_: str = self.special_care_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE_: Any = round(concept_cos - concept_threshold + adjustment , 3)
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]})
SCREAMING_SNAKE_CASE_: str = 0.01
for concept_idx in range(len(cos_dist[0])):
SCREAMING_SNAKE_CASE_: List[Any] = cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE_: int = self.concept_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE_: Dict = round(concept_cos - concept_threshold + adjustment , 3)
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase__)
result.append(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = [len(res["bad_concepts"]) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: int = self.vision_model(lowerCAmelCase__)[1] # pooled_output
SCREAMING_SNAKE_CASE_: List[str] = self.visual_projection(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = cosine_distance(lowerCAmelCase__ , self.special_care_embeds)
SCREAMING_SNAKE_CASE_: str = cosine_distance(lowerCAmelCase__ , self.concept_embeds)
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE_: List[Any] = 0.0
SCREAMING_SNAKE_CASE_: Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
SCREAMING_SNAKE_CASE_: str = torch.any(special_scores > 0 , dim=1)
SCREAMING_SNAKE_CASE_: List[str] = special_care * 0.01
SCREAMING_SNAKE_CASE_: Tuple = special_adjustment.unsqueeze(1).expand(-1 , cos_dist.shape[1])
SCREAMING_SNAKE_CASE_: int = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
SCREAMING_SNAKE_CASE_: Optional[int] = torch.any(concept_scores > 0 , dim=1)
return images, has_nsfw_concepts
| 13 | """simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
snake_case_ = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
snake_case_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCAmelCase , 1 ):
if n < _p:
# then we have our last prime to check
snake_case_ = primes[:idx]
break
snake_case_ , snake_case_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case_ = False
for r in range(UpperCAmelCase ):
snake_case_ = pow(UpperCAmelCase , d * 2**r , UpperCAmelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase ( ) -> None:
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 69 | 0 |
from __future__ import annotations
def lowerCamelCase__ (__lowerCamelCase = 4 ):
_SCREAMING_SNAKE_CASE : List[str] = abs(__lowerCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__lowerCamelCase )] for y in range(__lowerCamelCase )]
def lowerCamelCase__ (__lowerCamelCase ):
return reverse_row(transpose(__lowerCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def lowerCamelCase__ (__lowerCamelCase ):
return reverse_row(reverse_column(__lowerCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def lowerCamelCase__ (__lowerCamelCase ):
return reverse_column(transpose(__lowerCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = [list(__lowerCamelCase ) for x in zip(*__lowerCamelCase )]
return matrix
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = matrix[::-1]
return matrix
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = [x[::-1] for x in matrix]
return matrix
def lowerCamelCase__ (__lowerCamelCase ):
for i in matrix:
print(*__lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ =make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
UpperCamelCase__ =make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
UpperCamelCase__ =make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix)) | 353 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={'vocab_file': 'spiece.model'}
UpperCamelCase__ ={
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
UpperCamelCase__ ={
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCamelCase__ ='▁'
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowerCamelCase , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase="[CLS]" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<unk>" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<pad>" , __lowerCamelCase="[CLS]" , __lowerCamelCase="[MASK]" , __lowerCamelCase = None , **__lowerCamelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_SCREAMING_SNAKE_CASE : List[Any] = (
AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase , normalized=__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase )
else mask_token
)
_SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : Dict = do_lower_case
_SCREAMING_SNAKE_CASE : List[Any] = remove_space
_SCREAMING_SNAKE_CASE : str = keep_accents
_SCREAMING_SNAKE_CASE : Optional[int] = vocab_file
_SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return len(self.sp_model )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.__dict__.copy()
_SCREAMING_SNAKE_CASE : Optional[Any] = None
return state
def __setstate__( self , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
if self.remove_space:
_SCREAMING_SNAKE_CASE : List[str] = " ".join(inputs.strip().split() )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs
_SCREAMING_SNAKE_CASE : str = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_SCREAMING_SNAKE_CASE : str = unicodedata.normalize("NFKD" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] )
if self.do_lower_case:
_SCREAMING_SNAKE_CASE : Dict = outputs.lower()
return outputs
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = self.preprocess_text(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for piece in pieces:
if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_SCREAMING_SNAKE_CASE : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_SCREAMING_SNAKE_CASE : Union[str, Any] = cur_pieces[1:]
else:
_SCREAMING_SNAKE_CASE : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCamelCase )
else:
new_pieces.append(__lowerCamelCase )
return new_pieces
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[Any]:
return self.sp_model.PieceToId(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> str:
return self.sp_model.IdToPiece(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : List[str] = ""
_SCREAMING_SNAKE_CASE : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Optional[Any] = []
else:
current_sub_tokens.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
_SCREAMING_SNAKE_CASE : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,) | 325 | 0 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = AutoencoderKL
A__ : Optional[int] = "sample"
A__ : Tuple = 1E-2
@property
def A__ ( self: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 4
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : Any = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ )
return {"sample": image}
@property
def A__ ( self: List[str] ) -> Tuple:
return (3, 32, 32)
@property
def A__ ( self: Optional[Any] ) -> Any:
return (3, 32, 32)
def A__ ( self: Any ) -> Tuple:
UpperCAmelCase_ : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
UpperCAmelCase_ : int = self.dummy_input
return init_dict, inputs_dict
def A__ ( self: Optional[Any] ) -> int:
pass
def A__ ( self: str ) -> Any:
pass
@unittest.skipIf(torch_device == """mps""" ,"""Gradient checkpointing skipped on MPS""" )
def A__ ( self: Union[str, Any] ) -> Dict:
# enable deterministic behavior for gradient checkpointing
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = self.model_class(**lowerCamelCase_ )
model.to(lowerCamelCase_ )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase_ : Any = torch.randn_like(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase_ : str = self.model_class(**lowerCamelCase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase_ : Optional[int] = model_a(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase_ : Dict = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
UpperCAmelCase_ : Dict = dict(model.named_parameters() )
UpperCAmelCase_ : Union[str, Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) )
def A__ ( self: Optional[Any] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ,output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 )
model.to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A__ ( self: Optional[int] ) -> int:
UpperCAmelCase_ : Dict = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
UpperCAmelCase_ : Tuple = model.to(lowerCamelCase_ )
model.eval()
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
else:
UpperCAmelCase_ : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : str = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
UpperCAmelCase_ : int = image.to(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ,generator=lowerCamelCase_ ).sample
UpperCAmelCase_ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1e-2 ) )
@slow
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase_ ) for s in shape] )}.npy'''
def A__ ( self: Union[str, Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: List[Any]=(4, 3, 512, 512) ,lowerCamelCase_: Optional[Any]=False ) -> Optional[int]:
UpperCAmelCase_ : Tuple = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase_ ,lowerCamelCase_ ) ) ).to(lowerCamelCase_ ).to(lowerCamelCase_ )
return image
def A__ ( self: List[Any] ,lowerCamelCase_: List[str]="CompVis/stable-diffusion-v1-4" ,lowerCamelCase_: Union[str, Any]=False ) -> Any:
UpperCAmelCase_ : Optional[Any] = """fp16""" if fpaa else None
UpperCAmelCase_ : str = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : int = AutoencoderKL.from_pretrained(
lowerCamelCase_ ,subfolder="""vae""" ,torch_dtype=lowerCamelCase_ ,revision=lowerCamelCase_ ,)
model.to(lowerCamelCase_ ).eval()
return model
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any]=0 ) -> Optional[int]:
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase_ )
return torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ) -> Tuple:
UpperCAmelCase_ : List[Any] = self.get_sd_vae_model()
UpperCAmelCase_ : int = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Tuple = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Tuple:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : Optional[int] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.get_sd_vae_model()
UpperCAmelCase_ : Dict = self.get_sd_image(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : str = model(lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Any = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[int] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : str = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : Any = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase_ : Union[str, Any] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: str ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : str = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> int:
UpperCAmelCase_ : Optional[Any] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.get_sd_vae_model()
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[Any] = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : str = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : int = model.encode(lowerCamelCase_ ).latent_dist
UpperCAmelCase_ : Optional[Any] = dist.sample(generator=lowerCamelCase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase_ : Tuple = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase_ : Optional[Any] = torch.tensor(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=lowerCamelCase_ )
| 345 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_a , _a )
def lowerCamelCase_ ( _a : Any ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = emb.weight.shape
UpperCAmelCase_ : Tuple = nn.Linear(_a , _a , bias=_a )
UpperCAmelCase_ : List[Any] = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _a : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = torch.load(_a , map_location="""cpu""" )
UpperCAmelCase_ : Dict = Namespace(**checkpoint["""cfg"""]["""model"""] )
UpperCAmelCase_ : Optional[int] = checkpoint["""model"""]
remove_ignore_keys_(_a )
UpperCAmelCase_ : str = state_dict["""decoder.embed_tokens.weight"""].shape[0]
UpperCAmelCase_ : List[str] = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
UpperCAmelCase_ : int = XGLMConfig(
vocab_size=_a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
UpperCAmelCase_ : List[str] = XGLMForCausalLM(_a )
UpperCAmelCase_ : Tuple = model.load_state_dict(_a , strict=_a )
print(_a )
UpperCAmelCase_ : Optional[Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 345 | 1 |
"""simple docstring"""
from ....utils import logging
_lowerCAmelCase :List[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case_ ):
'''simple docstring'''
def __init__( self , A , A=None , A=2_0_4_8 ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = config.__dict__
_UpperCAmelCase : List[Any] = modal_hidden_size
if num_labels:
_UpperCAmelCase : str = num_labels
| 371 |
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowerCamelCase_ ():
_UpperCAmelCase : int = HfArgumentParser(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase : Union[str, Any] = TensorFlowBenchmark(args=UpperCamelCase__ )
try:
_UpperCAmelCase : Optional[int] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_UpperCAmelCase : List[Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
_UpperCAmelCase : Tuple = ''' '''.join(str(UpperCamelCase__ ).split(''' ''' )[:-1] )
_UpperCAmelCase : int = ''''''
_UpperCAmelCase : List[Any] = eval(str(UpperCamelCase__ ).split(''' ''' )[-1] )
_UpperCAmelCase : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
_UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(UpperCamelCase__ )
raise ValueError(UpperCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 68 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : int =BarthezTokenizer
UpperCamelCase__ : Union[str, Any] =BarthezTokenizerFast
UpperCamelCase__ : List[str] =True
UpperCamelCase__ : int =True
def __lowercase ( self ):
"""simple docstring"""
super().setUp()
__UpperCamelCase : Any =BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCamelCase__ )
__UpperCamelCase : str =tokenizer
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] ='<pad>'
__UpperCamelCase : Dict =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(lowerCamelCase__ ) , 101122 )
def __lowercase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =['A long paragraph for summarization.', 'Another paragraph for summarization.']
__UpperCamelCase : int =[0, 57, 3018, 70307, 91, 2]
__UpperCamelCase : Union[str, Any] =self.tokenizer(
lowerCamelCase__ , max_length=len(lowerCamelCase__ ) , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='pt' )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__UpperCamelCase : List[str] =batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__UpperCamelCase : Union[str, Any] =self.get_tokenizer()
__UpperCamelCase : int =self.get_rust_tokenizer()
__UpperCamelCase : str ='I was born in 92000, and this is falsé.'
__UpperCamelCase : Tuple =tokenizer.tokenize(lowerCamelCase__ )
__UpperCamelCase : List[str] =rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =self.get_rust_tokenizer()
__UpperCamelCase : Tuple =tokenizer.encode(lowerCamelCase__ )
__UpperCamelCase : List[str] =rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict ={'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__UpperCamelCase : str =[
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=lowerCamelCase__ , )
| 71 | '''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = """▁"""
__SCREAMING_SNAKE_CASE : str = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__SCREAMING_SNAKE_CASE : int = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
__SCREAMING_SNAKE_CASE : str = {
"""google/pegasus-xsum""": 512,
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = VOCAB_FILES_NAMES
__UpperCamelCase: Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase: List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase: Optional[int] = PegasusTokenizer
__UpperCamelCase: Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : Dict , A : List[str]=None , A : Union[str, Any]=None , A : Optional[int]="<pad>" , A : Tuple="</s>" , A : Union[str, Any]="<unk>" , A : Union[str, Any]="<mask_2>" , A : Dict="<mask_1>" , A : Union[str, Any]=None , A : int=103 , **A : Optional[Any] , ):
_UpperCAmelCase : Dict = offset
if additional_special_tokens is not None:
if not isinstance(A , A ):
raise TypeError(
F"""additional_special_tokens should be of type {type(A )}, but is"""
F""" {type(A )}""" )
_UpperCAmelCase : Optional[int] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(A ) , self.offset - 1 )
]
if len(set(A ) ) != len(A ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
_UpperCAmelCase : Any = additional_special_tokens_extended
else:
_UpperCAmelCase : Dict = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
A , tokenizer_file=A , pad_token=A , eos_token=A , unk_token=A , mask_token=A , mask_token_sent=A , offset=A , additional_special_tokens=A , **A , )
_UpperCAmelCase : Optional[Any] = vocab_file
_UpperCAmelCase : Optional[Any] = False if not self.vocab_file else True
def _A ( self : List[str] , A : Optional[Any] ):
_UpperCAmelCase : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
F""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def _A ( self : str , A : List , A : Optional[List] = None , A : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(A )
elif token_ids_a is None:
return self._special_token_mask(A ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _A ( self : Optional[int] , A : Union[str, Any] , A : int=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _A ( self : Union[str, Any] , A : str , A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : List[Any] = os.path.join(
A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 31 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCAmelCase__ : Optional[Any] ='\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
lowerCAmelCase__ : List[Any] ='\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
lowerCAmelCase__ : List[Any] ='\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase (datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 4 , ):
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase__ , hypotheses=lowerCAmelCase__ , min_len=lowerCAmelCase__ , max_len=lowerCAmelCase__ )
}
| 358 |
from __future__ import annotations
def a__ ( A__ ):
return len(set(A__ ) ) == len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 162 | 0 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
a = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
a = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
a = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
a = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
a = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def _snake_case ( _snake_case : Dict , _snake_case : Dict ) -> str:
'''simple docstring'''
for tf_name, hf_name in patterns:
_A = k.replace(_snake_case , _snake_case )
return k
def _snake_case ( _snake_case : dict , _snake_case : dict ) -> BigBirdPegasusForConditionalGeneration:
'''simple docstring'''
_A = BigBirdPegasusConfig(**_snake_case )
_A = BigBirdPegasusForConditionalGeneration(_snake_case )
_A = torch_model.state_dict()
_A = {}
# separating decoder weights
_A = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
_A = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
_A = [k.endswith(_snake_case ) for ending in KEYS_TO_IGNORE]
if any(_snake_case ):
continue
_A = DECODER_PATTERNS
_A = rename_state_dict_key(_snake_case , _snake_case )
if new_k not in state_dict:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
_A = v.T
_A = torch.from_numpy(_snake_case )
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
_A = [k.endswith(_snake_case ) for ending in KEYS_TO_IGNORE]
if any(_snake_case ):
continue
_A = REMAINING_PATTERNS
_A = rename_state_dict_key(_snake_case , _snake_case )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
_A = v.T
_A = torch.from_numpy(_snake_case )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
_A = mapping['model.embed_positions.weight']
_A = mapping.pop('model.embed_positions.weight' )
_A , _A = torch_model.load_state_dict(_snake_case , strict=_snake_case )
_A = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def _snake_case ( _snake_case : Any ) -> Dict:
'''simple docstring'''
_A = tf.train.list_variables(_snake_case )
_A = {}
_A = ['global_step']
for name, shape in tqdm(_snake_case , desc='converting tf checkpoint to dict' ):
_A = any(pat in name for pat in ignore_name )
if skip_key:
continue
_A = tf.train.load_variable(_snake_case , _snake_case )
_A = array
return tf_weights
def _snake_case ( _snake_case : str , _snake_case : str , _snake_case : dict ) -> Optional[Any]:
'''simple docstring'''
_A = get_tf_weights_as_numpy(_snake_case )
_A = convert_bigbird_pegasus(_snake_case , _snake_case )
torch_model.save_pretrained(_snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
a = parser.parse_args()
a = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 315 |
"""simple docstring"""
from manim import *
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Dict ):
_A = Rectangle(height=0.5 , width=0.5 )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_A = Rectangle(height=0.25 , width=0.25 )
_A = [mem.copy() for i in range(6 )]
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('CPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(4 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('GPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Model' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_UpperCAmelCase )
_A = []
_A = []
for i, rect in enumerate(_UpperCAmelCase ):
_A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 )
target.move_to(_UpperCAmelCase )
model_arr.append(_UpperCAmelCase )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_UpperCAmelCase )
self.add(*_UpperCAmelCase , *_UpperCAmelCase )
_A = [meta_mem.copy() for i in range(6 )]
_A = [meta_mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Disk' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_A = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_UpperCAmelCase )
_A = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase ) )
_A = Square(0.3 )
input.set_fill(_UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 )
self.play(Write(_UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(_UpperCAmelCase ) )
self.play(FadeOut(_UpperCAmelCase ) )
_A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_A = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) )
_A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_A = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_A = AnimationGroup(
FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_A = 0.7
self.play(
Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_A = a_c
_A = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , )
_A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) )
self.wait()
| 315 | 1 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowercase__ = (
"This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
)
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
warnings.warn(UpperCAmelCase_ , UpperCAmelCase_ )
requires_backends(UpperCAmelCase_ , 'sklearn' )
return (preds == labels).mean()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
warnings.warn(UpperCAmelCase_ , UpperCAmelCase_ )
requires_backends(UpperCAmelCase_ , 'sklearn' )
UpperCAmelCase : Tuple = simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = fa_score(y_true=UpperCAmelCase_ , y_pred=UpperCAmelCase_ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
warnings.warn(UpperCAmelCase_ , UpperCAmelCase_ )
requires_backends(UpperCAmelCase_ , 'sklearn' )
UpperCAmelCase : Optional[int] = pearsonr(UpperCAmelCase_ , UpperCAmelCase_ )[0]
UpperCAmelCase : int = spearmanr(UpperCAmelCase_ , UpperCAmelCase_ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
warnings.warn(UpperCAmelCase_ , UpperCAmelCase_ )
requires_backends(UpperCAmelCase_ , 'sklearn' )
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ), F"""Predictions and labels have mismatched lengths {len(UpperCAmelCase_ )} and {len(UpperCAmelCase_ )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(UpperCAmelCase_ , UpperCAmelCase_ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
elif task_name == "mrpc":
return acc_and_fa(UpperCAmelCase_ , UpperCAmelCase_ )
elif task_name == "sts-b":
return pearson_and_spearman(UpperCAmelCase_ , UpperCAmelCase_ )
elif task_name == "qqp":
return acc_and_fa(UpperCAmelCase_ , UpperCAmelCase_ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
elif task_name == "rte":
return {"acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
elif task_name == "hans":
return {"acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
else:
raise KeyError(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
warnings.warn(UpperCAmelCase_ , UpperCAmelCase_ )
requires_backends(UpperCAmelCase_ , 'sklearn' )
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(F"""Predictions and labels have mismatched lengths {len(UpperCAmelCase_ )} and {len(UpperCAmelCase_ )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
else:
raise KeyError(UpperCAmelCase_ )
| 280 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : str , lowercase_ : Union[str, Any]=2 , lowercase_ : Optional[Any]=3 , lowercase_ : List[Any]=4 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[Any]=7 , lowercase_ : Any=True , lowercase_ : Tuple=True , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=True , lowercase_ : str=99 , lowercase_ : str=36 , lowercase_ : int=3 , lowercase_ : int=4 , lowercase_ : Any=37 , lowercase_ : str="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : int=512 , lowercase_ : int=16 , lowercase_ : Dict=2 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=6 , lowercase_ : Tuple=6 , lowercase_ : Any=3 , lowercase_ : Dict=4 , lowercase_ : Any=None , lowercase_ : Tuple=1_000 , ) -> Tuple:
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : List[Any] = num_channels
UpperCAmelCase : Optional[int] = image_size
UpperCAmelCase : int = patch_size
UpperCAmelCase : Tuple = text_seq_length
UpperCAmelCase : int = is_training
UpperCAmelCase : Any = use_input_mask
UpperCAmelCase : Optional[int] = use_token_type_ids
UpperCAmelCase : int = use_labels
UpperCAmelCase : Dict = vocab_size
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : Any = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : Any = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : List[str] = type_vocab_size
UpperCAmelCase : List[str] = type_sequence_label_size
UpperCAmelCase : int = initializer_range
UpperCAmelCase : Optional[int] = coordinate_size
UpperCAmelCase : Optional[int] = shape_size
UpperCAmelCase : str = num_labels
UpperCAmelCase : str = num_choices
UpperCAmelCase : int = scope
UpperCAmelCase : Tuple = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase : Any = text_seq_length
UpperCAmelCase : int = (image_size // patch_size) ** 2 + 1
UpperCAmelCase : Optional[int] = self.text_seq_length + self.image_seq_length
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase : int = bbox[i, j, 3]
UpperCAmelCase : List[Any] = bbox[i, j, 1]
UpperCAmelCase : Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase : Tuple = bbox[i, j, 2]
UpperCAmelCase : List[str] = bbox[i, j, 0]
UpperCAmelCase : List[str] = t
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : int = None
if self.use_input_mask:
UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase : int = None
if self.use_token_type_ids:
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase : Dict = None
UpperCAmelCase : Dict = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : int = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase_ ( self : str , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : str , lowercase_ : Optional[Any] ) -> Any:
UpperCAmelCase : Dict = LayoutLMvaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# text + image
UpperCAmelCase : Optional[Any] = model(lowercase_ , pixel_values=lowercase_ )
UpperCAmelCase : str = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
UpperCAmelCase : Any = model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , token_type_ids=lowercase_ )
UpperCAmelCase : str = model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase : Dict = model(pixel_values=lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any , lowercase_ : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Union[str, Any] = LayoutLMvaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : int = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Any , lowercase_ : int , lowercase_ : str , lowercase_ : Any , lowercase_ : int , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int ) -> Any:
UpperCAmelCase : Optional[int] = self.num_labels
UpperCAmelCase : int = LayoutLMvaForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : Optional[Any] = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = LayoutLMvaForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : List[str] = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : str ) -> List[Any]:
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase : Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : List[str] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ : List[str] = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCAmelCase_ ( self : Any , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] ) -> Union[str, Any]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def UpperCAmelCase_ ( self : str ) -> Any:
UpperCAmelCase : Union[str, Any] = LayoutLMvaModelTester(self )
UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int , lowercase_ : Dict , lowercase_ : Any=False ) -> Optional[Any]:
UpperCAmelCase : str = copy.deepcopy(lowercase_ )
if model_class in get_values(lowercase_ ):
UpperCAmelCase : str = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowercase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase_ ):
UpperCAmelCase : Dict = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
elif model_class in get_values(lowercase_ ):
UpperCAmelCase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
UpperCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
elif model_class in [
*get_values(lowercase_ ),
]:
UpperCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
elif model_class in [
*get_values(lowercase_ ),
]:
UpperCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowercase_ , )
return inputs_dict
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Any ) -> Any:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : Optional[int] = type
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Any:
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
@slow
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : int = LayoutLMvaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCamelCase( ):
UpperCAmelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
return LayoutLMvaImageProcessor(apply_ocr=lowercase_ ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : int = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(lowercase_ )
UpperCAmelCase : Dict = self.default_image_processor
UpperCAmelCase : str = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(images=lowercase_ , return_tensors='pt' ).pixel_values.to(lowercase_ )
UpperCAmelCase : int = torch.tensor([[1, 2]] )
UpperCAmelCase : Tuple = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
UpperCAmelCase : Dict = model(
input_ids=input_ids.to(lowercase_ ) , bbox=bbox.to(lowercase_ ) , pixel_values=pixel_values.to(lowercase_ ) , )
# verify the logits
UpperCAmelCase : Optional[Any] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , lowercase_ )
UpperCAmelCase : List[str] = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1E-4 ) )
| 280 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a : Dict = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["""PerceiverFeatureExtractor"""]
a : str = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 265 |
import functools
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
UpperCamelCase : Optional[int] = len(_lowerCAmelCase )
UpperCamelCase : List[str] = len(_lowerCAmelCase )
@functools.cache
def min_distance(_lowerCAmelCase , _lowerCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCamelCase : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _lowerCAmelCase ) , 1 + min_distance(_lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 0 |
def lowerCAmelCase_ (lowerCAmelCase__: str ):
"""simple docstring"""
UpperCAmelCase_: List[str] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
UpperCAmelCase_: Optional[int] = hex_num[0] == "-"
if is_negative:
UpperCAmelCase_: Any = hex_num[1:]
try:
UpperCAmelCase_: List[str] = int(_SCREAMING_SNAKE_CASE , 1_6 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
UpperCAmelCase_: Dict = ""
while int_num > 0:
UpperCAmelCase_: str = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a : Optional[Any] = logging.getLogger(__name__)
a : List[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
a : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _a :
A = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(_lowerCAmelCase )} , )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class _a :
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''The input training data file (a text file).'''} )
A = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
A = field(default=_lowerCAmelCase , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
A = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
A = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
A = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
A = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCAmelCase_ (lowerCAmelCase__: DataTrainingArguments , lowerCAmelCase__: PreTrainedTokenizer , lowerCAmelCase__: bool = False , lowerCAmelCase__: Optional[str] = None , ):
"""simple docstring"""
def _dataset(lowerCAmelCase__: int , lowerCAmelCase__: Optional[Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=lowerCAmelCase__ , file_path=lowerCAmelCase__ , block_size=args.block_size , ref_path=lowerCAmelCase__ , )
return LineByLineTextDataset(tokenizer=lowerCAmelCase__ , file_path=lowerCAmelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCAmelCase__ , file_path=lowerCAmelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCAmelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCAmelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: int = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
UpperCAmelCase_: Dict = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_: Any = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
UpperCAmelCase_: int = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
UpperCAmelCase_: Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_: List[str] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
UpperCAmelCase_: int = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
UpperCAmelCase_: Union[str, Any] = AutoModelWithLMHead.from_config(lowerCAmelCase__ )
model.resize_token_embeddings(len(lowerCAmelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
UpperCAmelCase_: List[str] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
UpperCAmelCase_: Any = min(data_args.block_size , tokenizer.max_len )
# Get datasets
UpperCAmelCase_: str = (
get_dataset(lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
UpperCAmelCase_: List[Any] = (
get_dataset(lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , evaluate=lowerCAmelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
UpperCAmelCase_: Dict = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCAmelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
UpperCAmelCase_: str = DataCollatorForWholeWordMask(
tokenizer=lowerCAmelCase__ , mlm_probability=data_args.mlm_probability )
else:
UpperCAmelCase_: Optional[int] = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCAmelCase_: Union[str, Any] = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , train_dataset=lowerCAmelCase__ , eval_dataset=lowerCAmelCase__ , prediction_loss_only=lowerCAmelCase__ , )
# Training
if training_args.do_train:
UpperCAmelCase_: Dict = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCAmelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase_: Union[str, Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCAmelCase_: List[Any] = trainer.evaluate()
UpperCAmelCase_: Optional[Any] = math.exp(eval_output["""eval_loss"""] )
UpperCAmelCase_: Optional[Any] = {"""perplexity""": perplexity}
UpperCAmelCase_: Any = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(lowerCAmelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , lowerCAmelCase__ , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(lowerCAmelCase__ )
return results
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 82 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 64 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE__ : str = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=7 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Tuple=18 , SCREAMING_SNAKE_CASE__ : int=30 , SCREAMING_SNAKE_CASE__ : Any=4_00 , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , ) -> Union[str, Any]:
__lowerCamelCase = size if size is not None else {'''height''': 20, '''width''': 20}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = do_convert_rgb
__lowerCamelCase = [5_12, 10_24, 20_48, 40_96]
__lowerCamelCase = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __A ( self : int ) -> Any:
__lowerCamelCase = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
__lowerCamelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Dict = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Optional[Any] ) -> List[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self )
@property
def __A ( self : Optional[int] ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ) -> Dict:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : int ) -> str:
__lowerCamelCase = self.image_processor_tester.prepare_dummy_image()
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
__lowerCamelCase = 20_48
__lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __A ( self : Union[str, Any] ) -> Dict:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Dict ) -> str:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
__lowerCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
__lowerCamelCase = '''Hello'''
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : List[str] ) -> Any:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Tuple ) -> List[str]:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Any = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
__lowerCamelCase = 3
@property
def __A ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Union[str, Any] ) -> List[str]:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : Optional[int] ) -> str:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 270 | 0 |
"""simple docstring"""
lowercase__ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
lowercase__ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]:
a__: List[Any] = True
a__: Dict = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
order.append(_SCREAMING_SNAKE_CASE )
return order
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]:
a__: int = True
a__: str = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return component
def __a ( _SCREAMING_SNAKE_CASE ) ->list[list[int]]:
a__: Optional[int] = len(_SCREAMING_SNAKE_CASE ) * [False]
a__: dict[int, list[int]] = {vert: [] for vert in range(len(_SCREAMING_SNAKE_CASE ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_SCREAMING_SNAKE_CASE )
a__: int = []
for i, was_visited in enumerate(_SCREAMING_SNAKE_CASE ):
if not was_visited:
order += topology_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: int = []
a__: Optional[int] = len(_SCREAMING_SNAKE_CASE ) * [False]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
a__: Union[str, Any] = order[len(_SCREAMING_SNAKE_CASE ) - i - 1]
if not visited[vert]:
a__: Dict = find_components(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
components_list.append(_SCREAMING_SNAKE_CASE )
return components_list
| 203 | """simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self , lowercase , lowercase=12 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=5_12 , lowercase=0.02 , lowercase=0 , lowercase=None , ) -> Optional[int]:
'''simple docstring'''
a__: List[Any] = parent
a__: Any = batch_size
a__: int = seq_length
a__: List[str] = is_training
a__: Any = use_input_mask
a__: Optional[Any] = use_labels
a__: List[Any] = vocab_size
a__: Optional[Any] = hidden_size
a__: Any = projection_dim
a__: List[str] = num_hidden_layers
a__: Dict = num_attention_heads
a__: int = intermediate_size
a__: Tuple = dropout
a__: Union[str, Any] = attention_dropout
a__: str = max_position_embeddings
a__: List[str] = initializer_range
a__: Optional[int] = scope
a__: List[Any] = bos_token_id
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__: str = None
if self.use_input_mask:
a__: List[str] = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
a__: str = input_mask.numpy()
a__ , a__: Any = input_mask.shape
a__: Tuple = np.random.randint(1 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(lowercase):
a__: Tuple = 1
a__: Any = 0
a__: Any = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowercase)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__: Any = TFBlipTextModel(config=lowercase)
a__: Union[str, Any] = model(lowercase , attention_mask=lowercase , training=lowercase)
a__: List[Any] = model(lowercase , training=lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Tuple = self.prepare_config_and_inputs()
a__ , a__ , a__: Optional[Any] = config_and_inputs
a__: Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = (TFBlipTextModel,) if is_tf_available() else ()
a__ = False
a__ = False
a__ = False
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: List[Any] = BlipTextModelTester(self)
a__: int = ConfigTester(self , config_class=lowercase , hidden_size=37)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
pass
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='Blip does not use inputs_embeds')
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING')
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING')
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
pass
@slow
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: Tuple = TFBlipTextModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def lowerCamelCase_ ( self , lowercase=True) -> str:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=lowercase)
| 203 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = '▁'
__a = {'vocab_file': 'sentencepiece.bpe.model'}
__a = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
__a = {
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
__a = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : Optional[Any] = VOCAB_FILES_NAMES
_A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_A : str = ['''input_ids''', '''attention_mask''']
_A : List[int] = []
_A : List[int] = []
def __init__( self: str , snake_case: str , snake_case: List[Any]="<s>" , snake_case: Tuple="</s>" , snake_case: int="</s>" , snake_case: Optional[Any]="<s>" , snake_case: Optional[int]="<unk>" , snake_case: List[str]="<pad>" , snake_case: Union[str, Any]="<mask>" , snake_case: List[Any]=None , snake_case: Union[str, Any]=None , snake_case: Dict=None , snake_case: Dict = None , snake_case: Tuple=None , snake_case: int=False , **snake_case: Dict , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ :Any = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token
snake_case_ :List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case_ :List[str] = legacy_behaviour
super().__init__(
bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , cls_token=A__ , pad_token=A__ , mask_token=A__ , tokenizer_file=A__ , src_lang=A__ , tgt_lang=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=A__ , **A__ , )
snake_case_ :List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A__ ) )
snake_case_ :List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ :str = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ :str = 1
snake_case_ :Optional[int] = len(self.sp_model )
snake_case_ :List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A__ )
}
snake_case_ :Tuple = {v: k for k, v in self.lang_code_to_id.items()}
snake_case_ :Dict = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
snake_case_ :Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case_ :int = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
snake_case_ :int = src_lang if src_lang is not None else """eng_Latn"""
snake_case_ :str = self.lang_code_to_id[self._src_lang]
snake_case_ :Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self: Dict ) -> str:
snake_case_ :Tuple = self.__dict__.copy()
snake_case_ :List[Any] = None
snake_case_ :Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self: int , snake_case: int ) -> Any:
snake_case_ :str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ :Any = {}
snake_case_ :List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCAmelCase_ ( self: Optional[Any] ) -> int:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple:
return self._src_lang
@src_lang.setter
def lowerCAmelCase_ ( self: List[str] , snake_case: Optional[Any] ) -> str:
snake_case_ :str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: Dict , snake_case: Tuple = None , snake_case: int = False ) -> List[str]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
snake_case_ :Dict = [1] * len(self.prefix_tokens )
snake_case_ :Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A__ )) + suffix_ones
return prefix_ones + ([0] * len(A__ )) + ([0] * len(A__ )) + suffix_ones
def lowerCAmelCase_ ( self: Dict , snake_case: str , snake_case: int = None ) -> str:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: str , snake_case: List[str] = None ) -> str:
snake_case_ :Dict = [self.sep_token_id]
snake_case_ :Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase_ ( self: int , snake_case: Optional[int] , snake_case: Union[str, Any] , snake_case: Union[str, Any] , snake_case: Union[str, Any] , **snake_case: Union[str, Any] ) -> int:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case_ :Optional[int] = src_lang
snake_case_ :List[Any] = self(A__ , add_special_tokens=A__ , return_tensors=A__ , **A__ )
snake_case_ :Optional[int] = self.convert_tokens_to_ids(A__ )
snake_case_ :Optional[int] = tgt_lang_id
return inputs
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
snake_case_ :List[str] = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase_ ( self: Optional[int] , snake_case: List[str] ) -> Optional[Any]:
return self.sp_model.encode(A__ , out_type=A__ )
def lowerCAmelCase_ ( self: List[str] , snake_case: Optional[Any] ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ :List[str] = self.sp_model.PieceToId(A__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase_ ( self: Optional[int] , snake_case: Any ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase_ ( self: List[str] , snake_case: int ) -> List[Any]:
snake_case_ :Optional[Any] = """""".join(A__ ).replace(A__ , """ """ ).strip()
return out_string
def lowerCAmelCase_ ( self: List[str] , snake_case: Any , snake_case: int = None ) -> Union[str, Any]:
if not os.path.isdir(A__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ :Any = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , """wb""" ) as fi:
snake_case_ :str = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
def lowerCAmelCase_ ( self: Tuple , snake_case: Dict , snake_case: Dict = "eng_Latn" , snake_case: Optional[int] = None , snake_case: int = "fra_Latn" , **snake_case: Dict , ) -> Optional[int]:
snake_case_ :Any = src_lang
snake_case_ :List[Any] = tgt_lang
return super().prepare_seqaseq_batch(A__ , A__ , **A__ )
def lowerCAmelCase_ ( self: str ) -> Optional[Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase_ ( self: str ) -> int:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase_ ( self: Optional[int] , snake_case: Optional[int] ) -> List[Any]:
snake_case_ :List[str] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
snake_case_ :Dict = []
snake_case_ :str = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ :List[str] = [self.cur_lang_code]
snake_case_ :Optional[Any] = [self.eos_token_id]
def lowerCAmelCase_ ( self: List[Any] , snake_case: Optional[int] ) -> Optional[Any]:
snake_case_ :Union[str, Any] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
snake_case_ :Union[str, Any] = []
snake_case_ :int = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ :Dict = [self.cur_lang_code]
snake_case_ :str = [self.eos_token_id]
| 66 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def UpperCamelCase (lowercase_: List[str] , lowercase_: Optional[int] ) -> List[str]:
A__ : Tuple = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
A__ : List[Any] = DatasetInfosDict.from_directory(lowercase_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ),
] , )
def UpperCamelCase (lowercase_: str , lowercase_: DatasetInfo ) -> List[Any]:
A__ : Union[str, Any] = str(lowercase_ )
dataset_info.write_to_directory(lowercase_ )
A__ : List[Any] = DatasetInfo.from_directory(lowercase_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowercase_ , """dataset_info.json""" ) )
def UpperCamelCase () -> List[Any]:
A__ : Union[str, Any] = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
A__ : Dict = dataset_info._to_yaml_dict()
assert sorted(lowercase_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
A__ : Union[str, Any] = yaml.safe_dump(lowercase_ )
A__ : List[Any] = yaml.safe_load(lowercase_ )
assert dataset_info_yaml_dict == reloaded
def UpperCamelCase () -> List[str]:
A__ : Optional[int] = DatasetInfo()
A__ : List[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=42 ),
"""v2""": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCamelCase (lowercase_: Tuple , lowercase_: DatasetInfosDict ) -> Optional[Any]:
A__ : List[Any] = str(lowercase_ )
dataset_infos_dict.write_to_directory(lowercase_ )
A__ : Dict = DatasetInfosDict.from_directory(lowercase_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
A__ : Optional[int] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
A__ : List[str] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowercase_ , """README.md""" ) )
| 192 | 0 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __lowerCamelCase ( a_ : int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE :List[Any] = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def __lowerCamelCase ( a_ : Dict ) -> Dict:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Tuple = emb.weight.shape
__SCREAMING_SNAKE_CASE :Optional[Any] = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
__SCREAMING_SNAKE_CASE :List[Any] = emb.weight.data
return lin_layer
def __lowerCamelCase ( a_ : List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE :Optional[Any] = torch.load(snake_case_ , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE :Optional[Any] = Namespace(**checkpoint['''cfg''']['''model'''] )
__SCREAMING_SNAKE_CASE :Tuple = checkpoint['''model''']
remove_ignore_keys_(snake_case_ )
__SCREAMING_SNAKE_CASE :Dict = state_dict['''decoder.embed_tokens.weight'''].shape[0]
__SCREAMING_SNAKE_CASE :str = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
__SCREAMING_SNAKE_CASE :Optional[int] = XGLMConfig(
vocab_size=snake_case_ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
__SCREAMING_SNAKE_CASE :Any = XGLMForCausalLM(snake_case_ )
__SCREAMING_SNAKE_CASE :str = model.load_state_dict(snake_case_ , strict=snake_case_ )
print(snake_case_ )
__SCREAMING_SNAKE_CASE :Optional[Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
__A = parser.parse_args()
__A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path) | 368 |
"""simple docstring"""
import math
import random
def __lowerCamelCase ( a_ : float , a_ : bool = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCamelCase_ = 0.02
def __lowerCamelCase ( a_ : int , a_ : int ) -> float:
__SCREAMING_SNAKE_CASE :Any = float(2 * (random.randint(1 , 1_00 )) - 1 )
for _ in range(a_ ):
# Forward propagation
__SCREAMING_SNAKE_CASE :Any = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__SCREAMING_SNAKE_CASE :Tuple = (expected / 1_00) - layer_a
# Error delta
__SCREAMING_SNAKE_CASE :Union[str, Any] = layer_1_error * sigmoid_function(a_ , a_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = int(input("Expected value: "))
lowerCamelCase_ = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations)) | 239 | 0 |
import requests
a_ :Tuple = "YOUR API KEY"
def lowercase_ (A : str , A : str = giphy_api_key ):
snake_case__ : Optional[int] = '+'.join(query.split() )
snake_case__ : List[str] = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
snake_case__ : Any = requests.get(A ).json()['data']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 277 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class snake_case__ :
"""simple docstring"""
def __init__( self : List[str], _snake_case : Any, _snake_case : int=1_3, _snake_case : Optional[int]=7, _snake_case : int=True, _snake_case : Optional[Any]=True, _snake_case : Optional[Any]=True, _snake_case : Union[str, Any]=9_9, _snake_case : Optional[Any]=3_2, _snake_case : Tuple=5, _snake_case : str=4, _snake_case : Any=3_7, _snake_case : int="gelu", _snake_case : Optional[Any]=0.1, _snake_case : str=0.1, _snake_case : str=5_1_2, _snake_case : Dict=1_6, _snake_case : str=2, _snake_case : Union[str, Any]=0.0_2, _snake_case : Optional[int]=3, _snake_case : Union[str, Any]=4, _snake_case : Tuple=None, ) ->Optional[Any]:
snake_case__ : Optional[int] = parent
snake_case__ : List[Any] = batch_size
snake_case__ : Tuple = seq_length
snake_case__ : str = is_training
snake_case__ : Optional[int] = use_token_type_ids
snake_case__ : Any = use_labels
snake_case__ : Dict = vocab_size
snake_case__ : str = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : List[Any] = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : str = attention_probs_dropout_prob
snake_case__ : Any = max_position_embeddings
snake_case__ : Union[str, Any] = type_vocab_size
snake_case__ : Optional[Any] = type_sequence_label_size
snake_case__ : Optional[int] = initializer_range
snake_case__ : Optional[int] = num_labels
snake_case__ : str = num_choices
snake_case__ : int = scope
snake_case__ : List[str] = self.vocab_size - 1
def lowercase_ ( self : Union[str, Any] ) ->Tuple:
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
snake_case__ : List[str] = None
if self.use_token_type_ids:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
snake_case__ : Tuple = None
snake_case__ : str = None
snake_case__ : List[Any] = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
snake_case__ : List[str] = ids_tensor([self.batch_size], self.num_choices )
snake_case__ : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, )
snake_case__ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase_ ( self : Any, _snake_case : List[str], _snake_case : Any, _snake_case : List[Any], _snake_case : Tuple, *_snake_case : Optional[Any] ) ->Tuple:
snake_case__ : Union[str, Any] = OpenAIGPTModel(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[Any] = model(_snake_case, token_type_ids=_snake_case, head_mask=_snake_case )
snake_case__ : Union[str, Any] = model(_snake_case, token_type_ids=_snake_case )
snake_case__ : Optional[Any] = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Optional[int], _snake_case : Optional[Any], _snake_case : Union[str, Any], _snake_case : Optional[int], _snake_case : List[Any], *_snake_case : Dict ) ->Optional[int]:
snake_case__ : Optional[Any] = OpenAIGPTLMHeadModel(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Tuple = model(_snake_case, token_type_ids=_snake_case, labels=_snake_case )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : int, _snake_case : Tuple, _snake_case : List[str], _snake_case : List[Any], _snake_case : List[Any], *_snake_case : List[Any] ) ->Optional[int]:
snake_case__ : List[str] = OpenAIGPTDoubleHeadsModel(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[Any] = model(_snake_case, token_type_ids=_snake_case, labels=_snake_case )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Optional[int], _snake_case : Tuple, _snake_case : Dict, _snake_case : List[str], _snake_case : Optional[Any], *_snake_case : Union[str, Any] ) ->str:
snake_case__ : List[str] = self.num_labels
snake_case__ : Dict = OpenAIGPTForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case__ : List[str] = model(_snake_case, token_type_ids=_snake_case, labels=_snake_case )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict ) ->int:
snake_case__ : List[Any] = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Optional[Any] = config_and_inputs
snake_case__ : str = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase_ ( self : Optional[int], _snake_case : Union[str, Any], _snake_case : int, _snake_case : Tuple, _snake_case : Tuple, _snake_case : List[str] ) ->Optional[Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowercase_ ( self : Optional[Any], _snake_case : Union[str, Any], _snake_case : List[str], _snake_case : Any=False ) ->Tuple:
snake_case__ : Optional[int] = super()._prepare_for_class(_snake_case, _snake_case, return_labels=_snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
snake_case__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length), dtype=torch.long, device=_snake_case, )
snake_case__ : List[Any] = inputs_dict['labels']
snake_case__ : List[Any] = inputs_dict['labels']
snake_case__ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices), dtype=torch.long, device=_snake_case, )
snake_case__ : Tuple = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=_snake_case )
return inputs_dict
def lowercase_ ( self : Union[str, Any] ) ->List[str]:
snake_case__ : List[str] = OpenAIGPTModelTester(self )
snake_case__ : Any = ConfigTester(self, config_class=_snake_case, n_embd=3_7 )
def lowercase_ ( self : Optional[int] ) ->str:
self.config_tester.run_common_tests()
def lowercase_ ( self : int ) ->Tuple:
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_snake_case )
def lowercase_ ( self : Tuple ) ->List[str]:
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_snake_case )
def lowercase_ ( self : Dict ) ->int:
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_snake_case )
def lowercase_ ( self : int ) ->str:
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_snake_case )
@slow
def lowercase_ ( self : Optional[Any] ) ->str:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Optional[int] = OpenAIGPTModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_torch
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self : Tuple ) ->Optional[int]:
snake_case__ : Union[str, Any] = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(_snake_case )
snake_case__ : Tuple = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]], dtype=torch.long, device=_snake_case ) # the president is
snake_case__ : int = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
snake_case__ : Optional[int] = model.generate(_snake_case, do_sample=_snake_case )
self.assertListEqual(output_ids[0].tolist(), _snake_case )
| 277 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case : Tuple = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 357 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a_ ( lowerCAmelCase_ : Optional[Any] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X20000 and cp <= 0X2a6df) #
or (cp >= 0X2a700 and cp <= 0X2b73f) #
or (cp >= 0X2b740 and cp <= 0X2b81f) #
or (cp >= 0X2b820 and cp <= 0X2ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2f800 and cp <= 0X2fa1f) #
): #
return True
return False
def a_ ( lowerCAmelCase_ : str ):
# word like '180' or '身高' or '神'
for char in word:
__lowerCAmelCase = ord(lowerCAmelCase_ )
if not _is_chinese_char(lowerCAmelCase_ ):
return 0
return 1
def a_ ( lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = set()
for token in tokens:
__lowerCAmelCase = len(lowerCAmelCase_ ) > 1 and is_chinese(lowerCAmelCase_ )
if chinese_word:
word_set.add(lowerCAmelCase_ )
__lowerCAmelCase = list(lowerCAmelCase_ )
return word_list
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : set() ):
if not chinese_word_set:
return bert_tokens
__lowerCAmelCase = max([len(lowerCAmelCase_ ) for w in chinese_word_set] )
__lowerCAmelCase = bert_tokens
__lowerCAmelCase , __lowerCAmelCase = 0, len(lowerCAmelCase_ )
while start < end:
__lowerCAmelCase = True
if is_chinese(bert_word[start] ):
__lowerCAmelCase = min(end - start, lowerCAmelCase_ )
for i in range(lowerCAmelCase_, 1, -1 ):
__lowerCAmelCase = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
__lowerCAmelCase = '##' + bert_word[j]
__lowerCAmelCase = start + i
__lowerCAmelCase = False
break
if single_word:
start += 1
return bert_word
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : LTP, lowerCAmelCase_ : BertTokenizer ):
__lowerCAmelCase = []
for i in range(0, len(lowerCAmelCase_ ), 100 ):
__lowerCAmelCase = ltp_tokenizer.pipeline(lines[i : i + 100], tasks=['cws'] ).cws
__lowerCAmelCase = [get_chinese_word(lowerCAmelCase_ ) for r in res]
ltp_res.extend(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
__lowerCAmelCase = []
for i in range(0, len(lowerCAmelCase_ ), 100 ):
__lowerCAmelCase = bert_tokenizer(lines[i : i + 100], add_special_tokens=lowerCAmelCase_, truncation=lowerCAmelCase_, max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
__lowerCAmelCase = []
for input_ids, chinese_word in zip(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = []
for id in input_ids:
__lowerCAmelCase = bert_tokenizer._convert_id_to_token(lowerCAmelCase_ )
input_tokens.append(lowerCAmelCase_ )
__lowerCAmelCase = add_sub_symbol(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCAmelCase_ ):
if token[:2] == "##":
__lowerCAmelCase = token[2:]
# save chinese tokens' pos
if len(lowerCAmelCase_ ) == 1 and _is_chinese_char(ord(lowerCAmelCase_ ) ):
ref_id.append(lowerCAmelCase_ )
ref_ids.append(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
return ref_ids
def a_ ( lowerCAmelCase_ : int ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, 'r', encoding='utf-8' ) as f:
__lowerCAmelCase = f.readlines()
__lowerCAmelCase = [line.strip() for line in data if len(lowerCAmelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__lowerCAmelCase = LTP(args.ltp ) # faster in GPU device
__lowerCAmelCase = BertTokenizer.from_pretrained(args.bert )
__lowerCAmelCase = prepare_ref(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
with open(args.save_path, 'w', encoding='utf-8' ) as f:
__lowerCAmelCase = [json.dumps(lowerCAmelCase_ ) + '\n' for ref in ref_ids]
f.writelines(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : int = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
_snake_case : List[str] = parser.parse_args()
main(args)
| 207 | 0 |
"""simple docstring"""
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lowerCAmelCase_ : Tuple = 6
lowerCAmelCase_ : List[str] = 1
lowerCAmelCase_ : Union[str, Any] = 1901
lowerCAmelCase_ : Union[str, Any] = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowerCAmelCase_ : List[Any] = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
lowerCAmelCase_ : Tuple = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
lowerCAmelCase_ : Any = day - days_per_month[month - 2]
if month > 12:
year += 1
lowerCAmelCase_ : List[str] = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 241 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Union[str, Any] = ["""flax"""]
def __init__( self : Dict , *a_ : Optional[Any] , **a_ : List[str] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[Any] , *a_ : Union[str, Any] , **a_ : Optional[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : Union[str, Any] , **a_ : Any ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[Any] = ["""flax"""]
def __init__( self : Dict , *a_ : Optional[Any] , **a_ : Optional[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : str , *a_ : Union[str, Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : Optional[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Dict = ["""flax"""]
def __init__( self : Any , *a_ : Optional[int] , **a_ : str ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : Tuple , **a_ : Dict ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Union[str, Any] , *a_ : Any , **a_ : Union[str, Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Optional[Any] = ["""flax"""]
def __init__( self : str , *a_ : Optional[int] , **a_ : Optional[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : Dict , **a_ : str ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : Optional[int] , **a_ : List[str] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Optional[Any] = ["""flax"""]
def __init__( self : Optional[Any] , *a_ : Optional[Any] , **a_ : Optional[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : str , *a_ : Optional[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : Union[str, Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[Any] = ["""flax"""]
def __init__( self : Union[str, Any] , *a_ : Dict , **a_ : Any ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Tuple , *a_ : Optional[Any] , **a_ : Tuple ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[int] , *a_ : List[Any] , **a_ : Any ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[str] = ["""flax"""]
def __init__( self : Union[str, Any] , *a_ : str , **a_ : Any ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[Any] , *a_ : Any , **a_ : Tuple ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[Any] , *a_ : Optional[int] , **a_ : str ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : int = ["""flax"""]
def __init__( self : Dict , *a_ : str , **a_ : int ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : str , *a_ : List[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : List[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Tuple = ["""flax"""]
def __init__( self : Any , *a_ : Any , **a_ : int ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : Tuple , **a_ : Optional[int] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : Dict , **a_ : Dict ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Any = ["""flax"""]
def __init__( self : Union[str, Any] , *a_ : Any , **a_ : List[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : List[Any] , **a_ : Optional[int] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : List[Any] , **a_ : Tuple ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Tuple = ["""flax"""]
def __init__( self : Tuple , *a_ : Optional[int] , **a_ : Union[str, Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : List[str] , **a_ : Optional[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Union[str, Any] , *a_ : Any , **a_ : Any ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[str] = ["""flax"""]
def __init__( self : Optional[Any] , *a_ : Optional[Any] , **a_ : Dict ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : int , **a_ : List[str] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : int , **a_ : str ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Any = ["""flax"""]
def __init__( self : List[str] , *a_ : Optional[Any] , **a_ : List[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : Optional[int] , **a_ : Dict ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : Union[str, Any] , **a_ : Union[str, Any] ):
requires_backends(cls , ["flax"] )
| 241 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class a_ :
'''simple docstring'''
lowerCamelCase__ : Any = PegasusConfig
lowerCamelCase__ : Optional[Any] = {}
lowerCamelCase__ : int = 'gelu'
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_=9_9, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=4_0, lowerCamelCase_=2, lowerCamelCase_=1, lowerCamelCase_=0, ):
'''simple docstring'''
lowerCamelCase__ : Any = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Optional[int] = seq_length
lowerCamelCase__ : Tuple = is_training
lowerCamelCase__ : Optional[int] = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : Tuple = hidden_size
lowerCamelCase__ : Tuple = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : List[str] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : List[str] = eos_token_id
lowerCamelCase__ : Dict = pad_token_id
lowerCamelCase__ : Optional[Any] = bos_token_id
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowerCamelCase__ : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowerCamelCase__ : Union[str, Any] = tf.concat([input_ids, eos_tensor], axis=1 )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Any = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
lowerCamelCase__ : List[str] = prepare_pegasus_inputs_dict(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return config, inputs_dict
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TFPegasusModel(config=lowerCamelCase_ ).get_decoder()
lowerCamelCase__ : Any = inputs_dict['input_ids']
lowerCamelCase__ : Tuple = input_ids[:1, :]
lowerCamelCase__ : str = inputs_dict['attention_mask'][:1, :]
lowerCamelCase__ : Optional[Any] = inputs_dict['head_mask']
lowerCamelCase__ : str = 1
# first forward pass
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, head_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ : Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : int = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
lowerCamelCase__ : Tuple = tf.concat([input_ids, next_tokens], axis=-1 )
lowerCamelCase__ : List[str] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
lowerCamelCase__ : List[str] = int(ids_tensor((1,), output_from_past.shape[-1] ) )
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase_, lowerCamelCase_, rtol=1e-3 )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__ : Any = tf.cast(tf.math.not_equal(_lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__ : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
lowerCamelCase__ : int = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase__ : Optional[int] = (
{
'conversational': TFPegasusForConditionalGeneration,
'feature-extraction': TFPegasusModel,
'summarization': TFPegasusForConditionalGeneration,
'text2text-generation': TFPegasusForConditionalGeneration,
'translation': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase__ : int = True
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Tuple = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFPegasusModelTester(self )
lowerCamelCase__ : Tuple = ConfigTester(self, config_class=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
lowerCamelCase__ : str = [
'California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'
' reduce the risk of wildfires.',
'N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
lowerCamelCase__ : int = 'google/pegasus-xsum'
@cached_property
def a__ (self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.translate_src_text(**lowerCamelCase_ )
assert self.expected_text == generated_words
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.tokenizer(self.src_text, **lowerCamelCase_, padding=lowerCamelCase_, return_tensors='tf' )
lowerCamelCase__ : Tuple = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=lowerCamelCase_, )
lowerCamelCase__ : Tuple = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCamelCase_ )
return generated_words
@slow
def a__ (self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 353 |
"""simple docstring"""
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = len(lowerCamelCase_ )
lowerCamelCase__ : Any = [0] * len_array
if len_array > 0:
lowerCamelCase__ : Union[str, Any] = array[0]
for i in range(1, lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = self.prefix_sum[i - 1] + array[i]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCamelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 0 |
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__A = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__A = get_tests_dir("fixtures/vocab.json")
__A = get_tests_dir("fixtures")
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def _snake_case ( self ):
lowercase__: List[str] = 0
def _snake_case ( self ):
lowercase__: List[str] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__: str = WavaVecaConfig()
lowercase__: Union[str, Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
lowercase__: Optional[int] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
copyfile(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''vocab.json''' ) )
lowercase__: str = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__: Union[str, Any] = WavaVecaFeatureExtractor()
lowercase__: List[str] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
lowercase__: Union[str, Any] = WavaVecaProcessor(_UpperCAmelCase , _UpperCAmelCase )
# save in new folder
processor.save_pretrained(_UpperCAmelCase )
# drop `processor_class` in tokenizer
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , '''r''' ) as f:
lowercase__: Dict = json.load(_UpperCAmelCase )
config_dict.pop('''processor_class''' )
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , '''w''' ) as f:
f.write(json.dumps(_UpperCAmelCase ) )
lowercase__: str = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__: Union[str, Any] = WavaVecaFeatureExtractor()
lowercase__: Any = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
lowercase__: Optional[int] = WavaVecaProcessor(_UpperCAmelCase , _UpperCAmelCase )
# save in new folder
processor.save_pretrained(_UpperCAmelCase )
# drop `processor_class` in feature extractor
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , '''r''' ) as f:
lowercase__: List[str] = json.load(_UpperCAmelCase )
config_dict.pop('''processor_class''' )
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , '''w''' ) as f:
f.write(json.dumps(_UpperCAmelCase ) )
lowercase__: Union[str, Any] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__: Union[str, Any] = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(_UpperCAmelCase )
# copy relevant files
copyfile(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , '''w''' ) as f:
f.write('''{}''' )
lowercase__: List[str] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_UpperCAmelCase ):
lowercase__: Any = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCAmelCase ):
lowercase__: Tuple = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_UpperCAmelCase )
lowercase__: str = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_UpperCAmelCase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
lowercase__: List[str] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
lowercase__: Optional[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowercase__: Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase )
lowercase__: Tuple = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def _snake_case ( self ):
try:
AutoConfig.register('''custom''' , _UpperCAmelCase )
AutoFeatureExtractor.register(_UpperCAmelCase , _UpperCAmelCase )
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase )
AutoProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase ):
AutoProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase__: Union[str, Any] = CustomFeatureExtractor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__: List[str] = os.path.join(_UpperCAmelCase , '''vocab.txt''' )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase__: Tuple = CustomTokenizer(_UpperCAmelCase )
lowercase__: Optional[int] = CustomProcessor(_UpperCAmelCase , _UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(_UpperCAmelCase )
lowercase__: List[str] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _snake_case ( self ):
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = False
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :str = False
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = "AutoFeatureExtractor"
_UpperCAmelCase :int = "AutoTokenizer"
_UpperCAmelCase :Optional[int] = False
try:
AutoConfig.register('''custom''' , _UpperCAmelCase )
AutoFeatureExtractor.register(_UpperCAmelCase , _UpperCAmelCase )
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase )
AutoProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# If remote code is not set, the default is to use local classes.
lowercase__: int = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowercase__: Tuple = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_UpperCAmelCase )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowercase__: List[str] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_UpperCAmelCase )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _snake_case ( self ):
lowercase__: str = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def _snake_case ( self ):
lowercase__: Any = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :List[Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _snake_case ( cls ):
lowercase__: List[Any] = TOKEN
HfFolder.save_token(_UpperCAmelCase )
@classmethod
def _snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def _snake_case ( self ):
lowercase__: Optional[Any] = WavaVecaProcessor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_UpperCAmelCase , '''test-processor''' ) , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
lowercase__: List[Any] = WavaVecaProcessor.from_pretrained(F"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(new_processor.feature_extractor , _UpperCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _snake_case ( self ):
lowercase__: Optional[Any] = WavaVecaProcessor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_UpperCAmelCase , '''test-processor-org''' ) , push_to_hub=_UpperCAmelCase , use_auth_token=self._token , organization='''valid_org''' , )
lowercase__: List[str] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(new_processor.feature_extractor , _UpperCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _snake_case ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowercase__: Optional[Any] = CustomFeatureExtractor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__: Optional[Any] = os.path.join(_UpperCAmelCase , '''vocab.txt''' )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase__: List[Any] = CustomTokenizer(_UpperCAmelCase )
lowercase__: Any = CustomProcessor(_UpperCAmelCase , _UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"""{USER}/test-dynamic-processor""" , token=self._token )
lowercase__: List[Any] = Repository(_UpperCAmelCase , clone_from=F"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(_UpperCAmelCase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(_UpperCAmelCase , '''tokenizer_config.json''' ) ) as f:
lowercase__: int = json.load(_UpperCAmelCase )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '''custom_processing.py''' ) ) )
repo.push_to_hub()
lowercase__: Any = AutoProcessor.from_pretrained(F"""{USER}/test-dynamic-processor""" , trust_remote_code=_UpperCAmelCase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 177 | """simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=True , _UpperCAmelCase=1 / 255 , _UpperCAmelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase__: str = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
lowercase__: Optional[Any] = parent
lowercase__: List[Any] = batch_size
lowercase__: Tuple = num_channels
lowercase__: Optional[Any] = min_resolution
lowercase__: Dict = max_resolution
lowercase__: Optional[int] = do_resize
lowercase__: Any = size
lowercase__: Optional[Any] = do_normalize
lowercase__: Union[str, Any] = image_mean
lowercase__: Tuple = image_std
lowercase__: str = do_rescale
lowercase__: Any = rescale_factor
lowercase__: List[Any] = do_pad
def _snake_case ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
if not batched:
lowercase__: Optional[Any] = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image ):
lowercase__, lowercase__: Dict = image.size
else:
lowercase__, lowercase__: Optional[Any] = image.shape[1], image.shape[2]
if w < h:
lowercase__: List[str] = int(self.size['''shortest_edge'''] * h / w )
lowercase__: Union[str, Any] = self.size['''shortest_edge''']
elif w > h:
lowercase__: int = self.size['''shortest_edge''']
lowercase__: int = int(self.size['''shortest_edge'''] * w / h )
else:
lowercase__: Union[str, Any] = self.size['''shortest_edge''']
lowercase__: Union[str, Any] = self.size['''shortest_edge''']
else:
lowercase__: Optional[int] = []
for image in image_inputs:
lowercase__, lowercase__: int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__: Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0]
lowercase__: Dict = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = YolosImageProcessor if is_vision_available() else None
def _snake_case ( self ):
lowercase__: int = YolosImageProcessingTester(self )
@property
def _snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ):
lowercase__: List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
def _snake_case ( self ):
lowercase__: Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , _UpperCAmelCase )
lowercase__: Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , _UpperCAmelCase )
def _snake_case ( self ):
pass
def _snake_case ( self ):
# Initialize image_processing
lowercase__: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__: Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
lowercase__: int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase__, lowercase__: Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__, lowercase__: Any = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
lowercase__: int = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ):
# Initialize image_processing
lowercase__: List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
lowercase__: List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase__, lowercase__: str = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__: Dict = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
lowercase__, lowercase__: str = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ):
# Initialize image_processing
lowercase__: Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
lowercase__: Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase__, lowercase__: int = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__: List[str] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
lowercase__, lowercase__: List[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ):
# Initialize image_processings
lowercase__: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
lowercase__: Optional[Any] = self.image_processing_class(do_resize=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_rescale=_UpperCAmelCase )
# create random PyTorch tensors
lowercase__: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowercase__: List[str] = image_processing_a.pad(_UpperCAmelCase , return_tensors='''pt''' )
lowercase__: Tuple = image_processing_a(_UpperCAmelCase , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def _snake_case ( self ):
# prepare image and target
lowercase__: Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowercase__: Any = json.loads(f.read() )
lowercase__: Dict = {'''image_id''': 39769, '''annotations''': target}
# encode them
lowercase__: Dict = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
lowercase__: Any = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
lowercase__: Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _UpperCAmelCase )
lowercase__: Optional[Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4 ) )
# verify area
lowercase__: Tuple = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _UpperCAmelCase ) )
# verify boxes
lowercase__: str = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _UpperCAmelCase )
lowercase__: List[Any] = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _UpperCAmelCase , atol=1e-3 ) )
# verify image_id
lowercase__: Optional[int] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _UpperCAmelCase ) )
# verify is_crowd
lowercase__: Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _UpperCAmelCase ) )
# verify class_labels
lowercase__: Dict = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _UpperCAmelCase ) )
# verify orig_size
lowercase__: List[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _UpperCAmelCase ) )
# verify size
lowercase__: List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _UpperCAmelCase ) )
@slow
def _snake_case ( self ):
# prepare image, target and masks_path
lowercase__: str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowercase__: str = json.loads(f.read() )
lowercase__: List[Any] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
lowercase__: Dict = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowercase__: Union[str, Any] = YolosImageProcessor(format='''coco_panoptic''' )
lowercase__: Optional[Any] = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , masks_path=_UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
lowercase__: Optional[int] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _UpperCAmelCase )
lowercase__: Dict = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4 ) )
# verify area
lowercase__: str = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _UpperCAmelCase ) )
# verify boxes
lowercase__: List[str] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _UpperCAmelCase )
lowercase__: List[Any] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _UpperCAmelCase , atol=1e-3 ) )
# verify image_id
lowercase__: int = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _UpperCAmelCase ) )
# verify is_crowd
lowercase__: int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _UpperCAmelCase ) )
# verify class_labels
lowercase__: Dict = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _UpperCAmelCase ) )
# verify masks
lowercase__: Union[str, Any] = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _UpperCAmelCase )
# verify orig_size
lowercase__: List[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _UpperCAmelCase ) )
# verify size
lowercase__: Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _UpperCAmelCase ) )
| 177 | 1 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__A = re.compile(r'''\s+''')
def snake_case_(_UpperCamelCase ) -> str:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_UpperCamelCase , '''''' , example['''content'''] ).encode('''utf-8''' ) ).hexdigest()}
def snake_case_(_UpperCamelCase ) -> List[str]:
"""simple docstring"""
_snake_case = [len(_UpperCamelCase ) for line in example['''content'''].splitlines()]
return {"line_mean": np.mean(_UpperCamelCase ), "line_max": max(_UpperCamelCase )}
def snake_case_(_UpperCamelCase ) -> List[str]:
"""simple docstring"""
_snake_case = np.mean([c.isalnum() for c in example['''content''']] )
return {"alpha_frac": alpha_frac}
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example['''hash'''] )
return True
else:
return False
def snake_case_(_UpperCamelCase , _UpperCamelCase=5 ) -> Union[str, Any]:
"""simple docstring"""
_snake_case = ['''auto-generated''', '''autogenerated''', '''automatically generated''']
_snake_case = example['''content'''].splitlines()
for _, line in zip(range(_UpperCamelCase ) , _UpperCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def snake_case_(_UpperCamelCase , _UpperCamelCase=5 , _UpperCamelCase=0.05 ) -> Any:
"""simple docstring"""
_snake_case = ['''unit tests''', '''test file''', '''configuration file''']
_snake_case = example['''content'''].splitlines()
_snake_case = 0
_snake_case = 0
# first test
for _, line in zip(range(_UpperCamelCase ) , _UpperCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case = example['''content'''].count('''\n''' )
_snake_case = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('''config''' )
count_test += line.lower().count('''test''' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def snake_case_(_UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
_snake_case = ['''def ''', '''class ''', '''for ''', '''while ''']
_snake_case = example['''content'''].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def snake_case_(_UpperCamelCase , _UpperCamelCase=4 ) -> str:
"""simple docstring"""
_snake_case = example['''content'''].splitlines()
_snake_case = 0
for line in lines:
counter += line.lower().count('''=''' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def snake_case_(_UpperCamelCase ) -> int:
"""simple docstring"""
_snake_case = tokenizer(example['''content'''] , truncation=_UpperCamelCase )['''input_ids''']
_snake_case = len(example['''content'''] ) / len(_UpperCamelCase )
return {"ratio": ratio}
def snake_case_(_UpperCamelCase ) -> List[Any]:
"""simple docstring"""
_snake_case = {}
results.update(get_hash(_UpperCamelCase ) )
results.update(line_stats(_UpperCamelCase ) )
results.update(alpha_stats(_UpperCamelCase ) )
results.update(char_token_ratio(_UpperCamelCase ) )
results.update(is_autogenerated(_UpperCamelCase ) )
results.update(is_config_or_test(_UpperCamelCase ) )
results.update(has_no_keywords(_UpperCamelCase ) )
results.update(has_few_assignments(_UpperCamelCase ) )
return results
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
if not check_uniques(_UpperCamelCase , _UpperCamelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def snake_case_(_UpperCamelCase ) -> Tuple:
"""simple docstring"""
with open(_UpperCamelCase , '''rb''' ) as f_in:
with gzip.open(str(_UpperCamelCase ) + '''.gz''' , '''wb''' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
os.unlink(_UpperCamelCase )
# Settings
__A = HfArgumentParser(PreprocessingArguments)
__A = parser.parse_args()
if args.num_workers is None:
__A = multiprocessing.cpu_count()
__A = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__A = time.time()
__A = load_dataset(args.dataset_name, split='''train''')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__A = time.time()
__A = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__A = set(ds.unique('''hash'''))
__A = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__A = time.time()
__A = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__A = time.time()
__A , __A = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__A = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
__A = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
__A = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__A = str(data_dir / f'''file-{file_number+1:012}.json''')
__A = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 278 |
from math import factorial
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(_UpperCamelCase ) // (factorial(_UpperCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'''If a class of 40 students must be arranged into groups of''',
f'''4 for group projects, there are {combinations(40, 4)} ways''',
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f'''are {combinations(10, 3)} ways that first, second and''',
'''third place can be awarded.''',
)
| 278 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = ["""pixel_values"""]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = 32 , __UpperCAmelCase=PILImageResampling.BILINEAR , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = do_resize
__lowerCamelCase = do_rescale
__lowerCamelCase = size_divisor
__lowerCamelCase = resample
super().__init__(**__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = get_image_size(__UpperCAmelCase )
# Rounds the height and width down to the closest multiple of size_divisor
__lowerCamelCase = height // size_divisor * size_divisor
__lowerCamelCase = width // size_divisor * size_divisor
__lowerCamelCase = resize(__UpperCAmelCase , (new_h, new_w) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
return image
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
'''simple docstring'''
return rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = size_divisor if size_divisor is not None else self.size_divisor
__lowerCamelCase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
__lowerCamelCase = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(__UpperCAmelCase ) for img in images]
if do_resize:
__lowerCamelCase = [self.resize(__UpperCAmelCase , size_divisor=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(__UpperCAmelCase , scale=1 / 255 ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__lowerCamelCase = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 330 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
a_ = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
a_ = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
a_ = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
a_ = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
a_ = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Optional[Any] ):
for tf_name, hf_name in patterns:
__lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase )
return k
def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ):
__lowerCamelCase = BigBirdPegasusConfig(**_UpperCamelCase )
__lowerCamelCase = BigBirdPegasusForConditionalGeneration(_UpperCamelCase )
__lowerCamelCase = torch_model.state_dict()
__lowerCamelCase = {}
# separating decoder weights
__lowerCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
__lowerCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
__lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
__lowerCamelCase = DECODER_PATTERNS
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__lowerCamelCase = v.T
__lowerCamelCase = torch.from_numpy(_UpperCamelCase )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
__lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
__lowerCamelCase = REMAINING_PATTERNS
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__lowerCamelCase = v.T
__lowerCamelCase = torch.from_numpy(_UpperCamelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
__lowerCamelCase = mapping['''model.embed_positions.weight''']
__lowerCamelCase = mapping.pop('''model.embed_positions.weight''' )
__lowerCamelCase ,__lowerCamelCase = torch_model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase )
__lowerCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def a__ ( _UpperCamelCase : int ):
__lowerCamelCase = tf.train.list_variables(_UpperCamelCase )
__lowerCamelCase = {}
__lowerCamelCase = ['''global_step''']
for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ):
__lowerCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = array
return tf_weights
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : dict ):
__lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase )
__lowerCamelCase = convert_bigbird_pegasus(_UpperCamelCase ,_UpperCamelCase )
torch_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
a_ = parser.parse_args()
a_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 330 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class snake_case__ :
A__ = 42
A__ = None
A__ = None
A__ : List[str] = namedtuple('''CoinsDistribResult''', '''moves excess''')
def a_ ( _UpperCAmelCase : TreeNode | None ) -> int:
if root is None:
return 0
# Validation
def count_nodes(_UpperCAmelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_UpperCAmelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_UpperCAmelCase ) != count_coins(_UpperCAmelCase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(_UpperCAmelCase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 ,1 )
__snake_case , __snake_case : Tuple = get_distrib(node.left )
__snake_case , __snake_case : List[Any] = get_distrib(node.right )
__snake_case : List[Any] = 1 - left_distrib_excess
__snake_case : Any = 1 - right_distrib_excess
__snake_case : Union[str, Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(_UpperCAmelCase )
+ abs(_UpperCAmelCase )
)
__snake_case : Optional[Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_UpperCAmelCase ,_UpperCAmelCase )
return get_distrib(_UpperCAmelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Optional[Any] = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 0 | 1 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__lowercase : Dict = False
try:
__lowercase : Union[str, Any] = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class __UpperCamelCase :
def __init__( self , __a = None , __a = [] ):
'''simple docstring'''
__a : Optional[int] = 0
__a : Optional[int] = choices
__a : List[Any] = prompt
if sys.platform == "win32":
__a : List[Any] = '*'
else:
__a : Union[str, Any] = '➔ '
def __UpperCAmelCase ( self , __a , __a = "" ):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __a )
else:
forceWrite(self.choices[index] , __a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(__a )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def __UpperCAmelCase ( self , __a , __a = 1 ):
'''simple docstring'''
__a : List[Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a )
move_cursor(__a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a )] for number in range(10 )] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = int(chr(self.current_selection ) )
__a : Any = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __a )
else:
return
else:
return
def __UpperCAmelCase ( self , __a = 0 ):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
__a : str = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__a )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
__a : List[Any] = int(builtins.input() )
except ValueError:
__a : Any = default_choice
else:
__a : List[Any] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(__a , '\n' )
return choice
| 27 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase ( UpperCamelCase__ ):
def __get__( self :Optional[int] , lowercase_ :Tuple , lowercase_ :Tuple=None )-> Optional[Any]:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
A__ = "__cached_" + self.fget.__name__
A__ = getattr(lowercase_ , lowercase_ , lowercase_ )
if cached is None:
A__ = self.fget(lowercase_ )
setattr(lowercase_ , lowercase_ , lowercase_ )
return cached
def UpperCamelCase ( _lowerCamelCase : Dict ):
A__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"invalid truth value {val!r}" )
def UpperCamelCase ( _lowerCamelCase : Any ):
if is_torch_fx_proxy(_lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(_lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(_lowerCamelCase , np.ndarray )
def UpperCamelCase ( _lowerCamelCase : str ):
return isinstance(_lowerCamelCase , np.ndarray )
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] ):
return _is_numpy(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Dict ):
import torch
return isinstance(_lowerCamelCase , torch.Tensor )
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] ):
return False if not is_torch_available() else _is_torch(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Any ):
import torch
return isinstance(_lowerCamelCase , torch.device )
def UpperCamelCase ( _lowerCamelCase : int ):
return False if not is_torch_available() else _is_torch_device(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Optional[Any] ):
import torch
if isinstance(_lowerCamelCase , _lowerCamelCase ):
if hasattr(_lowerCamelCase , _lowerCamelCase ):
A__ = getattr(_lowerCamelCase , _lowerCamelCase )
else:
return False
return isinstance(_lowerCamelCase , torch.dtype )
def UpperCamelCase ( _lowerCamelCase : Any ):
return False if not is_torch_available() else _is_torch_dtype(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : List[Any] ):
import tensorflow as tf
return isinstance(_lowerCamelCase , tf.Tensor )
def UpperCamelCase ( _lowerCamelCase : List[str] ):
return False if not is_tf_available() else _is_tensorflow(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_lowerCamelCase , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(_lowerCamelCase )
return type(_lowerCamelCase ) == tf.Tensor
def UpperCamelCase ( _lowerCamelCase : str ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : str ):
import jax.numpy as jnp # noqa: F811
return isinstance(_lowerCamelCase , jnp.ndarray )
def UpperCamelCase ( _lowerCamelCase : Tuple ):
return False if not is_flax_available() else _is_jax(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Optional[int] ):
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return [to_py_obj(_lowerCamelCase ) for o in obj]
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase ).tolist()
elif isinstance(_lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def UpperCamelCase ( _lowerCamelCase : int ):
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return np.array(_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase )
else:
return obj
class UpperCAmelCase ( UpperCamelCase__ ):
def UpperCAmelCase_ ( self :int )-> Any:
A__ = fields(self )
# Safety and consistency checks
if not len(lowercase_ ):
raise ValueError(F"{self.__class__.__name__} has no fields." )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"{self.__class__.__name__} should not have more than one required field." )
A__ = getattr(self , class_fields[0].name )
A__ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowercase_ ):
if isinstance(lowercase_ , lowercase_ ):
A__ = first_field.items()
A__ = True
else:
try:
A__ = iter(lowercase_ )
A__ = True
except TypeError:
A__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowercase_ ):
if (
not isinstance(lowercase_ , (list, tuple) )
or not len(lowercase_ ) == 2
or not isinstance(element[0] , lowercase_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
A__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"Cannot set key/value for {element}. It needs to be a tuple (key, value)." )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
A__ = element[1]
elif first_field is not None:
A__ = first_field
else:
for field in class_fields:
A__ = getattr(self , field.name )
if v is not None:
A__ = v
def __delitem__( self :List[Any] , *lowercase_ :List[Any] , **lowercase_ :Optional[Any] )-> Union[str, Any]:
raise Exception(F"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance." )
def UpperCAmelCase_ ( self :Tuple , *lowercase_ :int , **lowercase_ :int )-> Union[str, Any]:
raise Exception(F"You cannot use ``setdefault`` on a {self.__class__.__name__} instance." )
def UpperCAmelCase_ ( self :List[Any] , *lowercase_ :Optional[int] , **lowercase_ :Tuple )-> List[Any]:
raise Exception(F"You cannot use ``pop`` on a {self.__class__.__name__} instance." )
def UpperCAmelCase_ ( self :Dict , *lowercase_ :Optional[int] , **lowercase_ :Any )-> Any:
raise Exception(F"You cannot use ``update`` on a {self.__class__.__name__} instance." )
def __getitem__( self :Optional[Any] , lowercase_ :Optional[Any] )-> Any:
if isinstance(lowercase_ , lowercase_ ):
A__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :Union[str, Any] )-> Tuple:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowercase_ , lowercase_ )
super().__setattr__(lowercase_ , lowercase_ )
def __setitem__( self :Tuple , lowercase_ :Optional[int] , lowercase_ :Tuple )-> List[Any]:
# Will raise a KeyException if needed
super().__setitem__(lowercase_ , lowercase_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self :List[Any] )-> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
@classmethod
def UpperCAmelCase_ ( cls :Any , lowercase_ :int )-> List[str]:
raise ValueError(
F"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}" )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """longest"""
__lowercase = """max_length"""
__lowercase = """do_not_pad"""
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """pt"""
__lowercase = """tf"""
__lowercase = """np"""
__lowercase = """jax"""
class UpperCAmelCase :
def __init__( self :List[str] , lowercase_ :List[ContextManager] )-> str:
A__ = context_managers
A__ = ExitStack()
def __enter__( self :Dict )-> Any:
for context_manager in self.context_managers:
self.stack.enter_context(lowercase_ )
def __exit__( self :List[Any] , *lowercase_ :Optional[Any] , **lowercase_ :str )-> Union[str, Any]:
self.stack.__exit__(*lowercase_ , **lowercase_ )
def UpperCamelCase ( _lowerCamelCase : Dict ):
A__ = infer_framework(_lowerCamelCase )
if framework == "tf":
A__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
A__ = inspect.signature(model_class.forward ) # PyTorch models
else:
A__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def UpperCamelCase ( _lowerCamelCase : List[str] ):
A__ = model_class.__name__
A__ = infer_framework(_lowerCamelCase )
if framework == "tf":
A__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
A__ = inspect.signature(model_class.forward ) # PyTorch models
else:
A__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def UpperCamelCase ( _lowerCamelCase : MutableMapping , _lowerCamelCase : str = "" , _lowerCamelCase : str = "." ):
def _flatten_dict(_lowerCamelCase : List[Any] , _lowerCamelCase : int="" , _lowerCamelCase : Any="." ):
for k, v in d.items():
A__ = str(_lowerCamelCase ) + delimiter + str(_lowerCamelCase ) if parent_key else k
if v and isinstance(_lowerCamelCase , _lowerCamelCase ):
yield from flatten_dict(_lowerCamelCase , _lowerCamelCase , delimiter=_lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) )
@contextmanager
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : bool = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any]=None ):
if is_numpy_array(_lowerCamelCase ):
return np.transpose(_lowerCamelCase , axes=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.T if axes is None else array.permute(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.transpose(_lowerCamelCase , perm=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.transpose(_lowerCamelCase , axes=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for transpose: {type(_lowerCamelCase )}." )
def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Any ):
if is_numpy_array(_lowerCamelCase ):
return np.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.reshape(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.reshape(_lowerCamelCase , _lowerCamelCase )
else:
raise ValueError(F"Type not supported for reshape: {type(_lowerCamelCase )}." )
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any]=None ):
if is_numpy_array(_lowerCamelCase ):
return np.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for squeeze: {type(_lowerCamelCase )}." )
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict ):
if is_numpy_array(_lowerCamelCase ):
return np.expand_dims(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.unsqueeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def UpperCamelCase ( _lowerCamelCase : List[str] ):
if is_numpy_array(_lowerCamelCase ):
return np.size(_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.numel()
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.size(_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return array.size
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ):
for key, value in auto_map.items():
if isinstance(_lowerCamelCase , (tuple, list) ):
A__ = [F"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
A__ = F"{repo_id}--{value}"
return auto_map
def UpperCamelCase ( _lowerCamelCase : Dict ):
for base_class in inspect.getmro(_lowerCamelCase ):
A__ = base_class.__module__
A__ = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"Could not infer framework from class {model_class}." )
| 237 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ : int = logging.get_logger(__name__)
lowerCAmelCase__ : Tuple = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''focalnet'''
def __init__( self : str , UpperCAmelCase_ : Dict=224 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Optional[int]=96 , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Any=[192, 384, 768, 768] , UpperCAmelCase_ : List[Any]=[2, 2, 6, 2] , UpperCAmelCase_ : int=[2, 2, 2, 2] , UpperCAmelCase_ : Dict=[3, 3, 3, 3] , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=4.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : int=1e-4 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Optional[int]=1e-5 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : Dict = patch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Any = embed_dim
__UpperCAmelCase : Tuple = use_conv_embed
__UpperCAmelCase : int = hidden_sizes
__UpperCAmelCase : Union[str, Any] = depths
__UpperCAmelCase : List[str] = focal_levels
__UpperCAmelCase : Optional[Any] = focal_windows
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : str = mlp_ratio
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : Tuple = drop_path_rate
__UpperCAmelCase : List[Any] = use_layerscale
__UpperCAmelCase : Tuple = layerscale_value
__UpperCAmelCase : List[Any] = use_post_layernorm
__UpperCAmelCase : int = use_post_layernorm_in_modulation
__UpperCAmelCase : List[str] = normalize_modulator
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : str = encoder_stride
__UpperCAmelCase : Optional[int] = ["stem"] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
__UpperCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
| 357 |
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __UpperCamelCase ( *_UpperCAmelCase ):
with open(_UpperCAmelCase, "r" ) as fh:
fcntl.flock(_UpperCAmelCase, fcntl.LOCK_EX )
try:
print(*_UpperCAmelCase )
finally:
fcntl.flock(_UpperCAmelCase, fcntl.LOCK_UN )
lowerCAmelCase__ : Dict = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
lowerCAmelCase__ : Optional[int] = torch.device("cuda", local_rank)
lowerCAmelCase__ : List[str] = socket.gethostname()
lowerCAmelCase__ : Optional[Any] = f"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
lowerCAmelCase__ : Tuple = dist.get_rank()
lowerCAmelCase__ : Optional[int] = dist.get_world_size()
printflock(f"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(f"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(f"{gpu} is broken")
raise
| 37 | 0 |
__lowerCAmelCase : Dict = "Input must be a string of 8 numbers plus letter"
__lowerCAmelCase : List[str] = "TRWAGMYFPDXBNJZSQVHLCKE"
def __magic_name__ ( A : str ):
'''simple docstring'''
if not isinstance(A, A ):
a = F"""Expected string as input, found {type(A ).__name__}"""
raise TypeError(A )
a = spanish_id.replace("-", "" ).upper()
if len(A ) != 9:
raise ValueError(A )
try:
a = int(spanish_id_clean[0:8] )
a = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(A ) from ex
if letter.isdigit():
raise ValueError(A )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase: str = logging.get_logger(__name__)
_lowercase: List[str] = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = "trocr"
__A = ["past_key_values"]
__A = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__(self , lowerCamelCase_=50265 , lowerCamelCase_=1024 , lowerCamelCase_=12 , lowerCamelCase_=16 , lowerCamelCase_=4096 , lowerCamelCase_="gelu" , lowerCamelCase_=512 , lowerCamelCase_=0.1 , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=0.0 , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=2 , **lowerCamelCase_ , ):
"""simple docstring"""
a = vocab_size
a = d_model
a = decoder_layers
a = decoder_attention_heads
a = decoder_ffn_dim
a = activation_function
a = max_position_embeddings
a = dropout
a = attention_dropout
a = activation_dropout
a = init_std
a = decoder_layerdrop
a = use_cache
a = scale_embedding
a = use_learned_position_embeddings
a = layernorm_embedding
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 227 | 0 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :List[Any] = IFInpaintingPipeline
_UpperCAmelCase :List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
_UpperCAmelCase :Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_UpperCAmelCase :Any = PipelineTesterMixin.required_optional_params - {"latents"}
def _snake_case ( self ):
return self._get_dummy_components()
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(_UpperCAmelCase ).startswith('''mps''' ):
lowercase__: Dict = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__: Optional[int] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__: Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__: List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__: str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _snake_case ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def _snake_case ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _snake_case ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _snake_case ( self ):
self._test_save_load_local()
def _snake_case ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 2 | """simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = CTRLTokenizer
_UpperCAmelCase :Any = False
_UpperCAmelCase :List[Any] = False
def _snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__: Dict = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__: Any = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
lowercase__: Optional[int] = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__: Optional[Any] = {'''unk_token''': '''<unk>'''}
lowercase__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_UpperCAmelCase ) )
def _snake_case ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Optional[int] = '''adapt react readapt apt'''
lowercase__: Optional[int] = '''adapt react readapt apt'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__: Optional[int] = '''adapt react readapt apt'''
lowercase__: Any = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__: Optional[Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: int = tokens + [tokenizer.unk_token]
lowercase__: str = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
| 2 | 1 |
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
__lowerCAmelCase : str =[
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
__lowerCAmelCase : Tuple =[
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def UpperCamelCase ( ):
A__ = calculate_rouge(UpperCamelCase__ , UpperCamelCase__ , bootstrap_aggregation=UpperCamelCase__ , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
A__ = calculate_rouge(UpperCamelCase__ , UpperCamelCase__ , bootstrap_aggregation=UpperCamelCase__ , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def UpperCamelCase ( ):
A__ = "rougeLsum"
A__ = calculate_rouge(UpperCamelCase__ , UpperCamelCase__ , newline_sep=UpperCamelCase__ , rouge_keys=[k] )[k]
A__ = calculate_rouge(UpperCamelCase__ , UpperCamelCase__ , newline_sep=UpperCamelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def UpperCamelCase ( ):
A__ = ["rouge1", "rouge2", "rougeL"]
A__ = calculate_rouge(UpperCamelCase__ , UpperCamelCase__ , newline_sep=UpperCamelCase__ , rouge_keys=UpperCamelCase__ )
A__ = calculate_rouge(UpperCamelCase__ , UpperCamelCase__ , newline_sep=UpperCamelCase__ , rouge_keys=UpperCamelCase__ )
assert score_sep == score_no_sep
def UpperCamelCase ( ):
A__ = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
A__ = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(UpperCamelCase__ , UpperCamelCase__ , newline_sep=UpperCamelCase__ ) == calculate_rouge(UpperCamelCase__ , UpperCamelCase__ , newline_sep=UpperCamelCase__ )
def UpperCamelCase ( ):
A__ = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
A__ = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
A__ = calculate_rouge(UpperCamelCase__ , UpperCamelCase__ , rouge_keys=["rougeLsum"] , newline_sep=UpperCamelCase__ )["rougeLsum"]
A__ = calculate_rouge(UpperCamelCase__ , UpperCamelCase__ , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def UpperCamelCase ( ):
A__ = Path("examples/seq2seq/test_data/wmt_en_ro" )
A__ = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
A__ = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
| 237 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = (DDIMParallelScheduler,)
__SCREAMING_SNAKE_CASE = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def UpperCamelCase ( self,**__lowerCamelCase ):
A__ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**__lowerCamelCase )
return config
def UpperCamelCase ( self,**__lowerCamelCase ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**__lowerCamelCase )
A__ = scheduler_class(**__lowerCamelCase )
A__ , A__ = 10, 0.0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for t in scheduler.timesteps:
A__ = model(__lowerCamelCase,__lowerCamelCase )
A__ = scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ).prev_sample
return sample
def UpperCamelCase ( self ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def UpperCamelCase ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCamelCase )
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(steps_offset=1 )
A__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps,torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1],[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCamelCase,beta_end=__lowerCamelCase )
def UpperCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def UpperCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def UpperCamelCase ( self ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCamelCase )
def UpperCamelCase ( self ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCamelCase )
def UpperCamelCase ( self ):
self.check_over_configs(thresholding=__lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCamelCase,prediction_type=__lowerCamelCase,sample_max_value=__lowerCamelCase,)
def UpperCamelCase ( self ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCamelCase )
def UpperCamelCase ( self ):
for t, num_inference_steps in zip([1, 10, 50],[10, 50, 500] ):
self.check_over_forward(time_step=__lowerCamelCase,num_inference_steps=__lowerCamelCase )
def UpperCamelCase ( self ):
for t, eta in zip([1, 10, 49],[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCamelCase,eta=__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0,0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420,400 ) - 0.14771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980,960 ) - 0.32460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0,0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487,486 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999,998 ) - 0.02 ) ) < 1E-5
def UpperCamelCase ( self ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**__lowerCamelCase )
A__ , A__ = 10, 0.0
scheduler.set_timesteps(__lowerCamelCase )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = self.dummy_sample_deter + 0.1
A__ = self.dummy_sample_deter - 0.1
A__ = samplea.shape[0]
A__ = torch.stack([samplea, samplea, samplea],dim=0 )
A__ = torch.arange(__lowerCamelCase )[0:3, None].repeat(1,__lowerCamelCase )
A__ = model(samples.flatten(0,1 ),timesteps.flatten(0,1 ) )
A__ = scheduler.batch_step_no_noise(__lowerCamelCase,timesteps.flatten(0,1 ),samples.flatten(0,1 ),__lowerCamelCase )
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def UpperCamelCase ( self ):
A__ = self.full_loop()
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.223967 ) < 1E-3
def UpperCamelCase ( self ):
A__ = self.full_loop(prediction_type='''v_prediction''' )
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def UpperCamelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
A__ = self.full_loop(set_alpha_to_one=__lowerCamelCase,beta_start=0.01 )
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def UpperCamelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
A__ = self.full_loop(set_alpha_to_one=__lowerCamelCase,beta_start=0.01 )
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 193 | 0 |
lowerCamelCase__ = range(2, 20 + 1)
lowerCamelCase__ = [10**k for k in range(ks[-1] + 1)]
lowerCamelCase__ = {}
def A(__a: Any , __a: Dict , __a: Any , __a: Dict ):
lowerCAmelCase_ = sum(a_i[j] for j in range(__a , len(__a ) ) )
lowerCAmelCase_ = sum(a_i[j] * base[j] for j in range(min(len(__a ) , __a ) ) )
lowerCAmelCase_ , lowerCAmelCase_ = 0, 0
lowerCAmelCase_ = n - i
lowerCAmelCase_ = memo.get(__a )
if sub_memo is not None:
lowerCAmelCase_ = sub_memo.get(__a )
if jumps is not None and len(__a ) > 0:
# find and make the largest jump without going over
lowerCAmelCase_ = -1
for _k in range(len(__a ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCAmelCase_ = _k
break
if max_jump >= 0:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCAmelCase_ = diff + c
for j in range(min(__a , len(__a ) ) ):
lowerCAmelCase_ , lowerCAmelCase_ = divmod(__a , 10 )
if new_c > 0:
add(__a , __a , __a )
else:
lowerCAmelCase_ = []
else:
lowerCAmelCase_ = {c: []}
lowerCAmelCase_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCAmelCase_ , lowerCAmelCase_ = next_term(__a , k - 1 , i + dn , __a )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCAmelCase_ , lowerCAmelCase_ = compute(__a , __a , i + dn , __a )
diff += _diff
dn += terms_jumped
lowerCAmelCase_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCAmelCase_ = 0
while j < len(__a ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__a , (diff, dn, k) )
return (diff, dn)
def A(__a: Dict , __a: List[Any] , __a: Tuple , __a: Tuple ):
if i >= n:
return 0, i
if k > len(__a ):
a_i.extend([0 for _ in range(k - len(__a ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCAmelCase_ = i
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0, 0, 0
for j in range(len(__a ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCAmelCase_ = ds_c + ds_b
diff += addend
lowerCAmelCase_ = 0
for j in range(__a ):
lowerCAmelCase_ = a_i[j] + addend
lowerCAmelCase_ , lowerCAmelCase_ = divmod(__a , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__a , __a , __a )
return diff, i - start_i
def A(__a: str , __a: int , __a: Union[str, Any] ):
for j in range(__a , len(__a ) ):
lowerCAmelCase_ = digits[j] + addend
if s >= 10:
lowerCAmelCase_ , lowerCAmelCase_ = divmod(__a , 10 )
lowerCAmelCase_ = addend // 10 + quotient
else:
lowerCAmelCase_ = s
lowerCAmelCase_ = addend // 10
if addend == 0:
break
while addend > 0:
lowerCAmelCase_ , lowerCAmelCase_ = divmod(__a , 10 )
digits.append(__a )
def A(__a: int = 10**15 ):
lowerCAmelCase_ = [1]
lowerCAmelCase_ = 1
lowerCAmelCase_ = 0
while True:
lowerCAmelCase_ , lowerCAmelCase_ = next_term(__a , 20 , i + dn , __a )
dn += terms_jumped
if dn == n - i:
break
lowerCAmelCase_ = 0
for j in range(len(__a ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 22 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = MobileBertTokenizer
lowerCamelCase__ = MobileBertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = filter_non_english
lowerCamelCase__ = '''google/mobilebert-uncased'''
def __a ( self ) -> Optional[Any]:
super().setUp()
lowerCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowerCAmelCase_ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __a ( self , _a ) -> Any:
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = "unwanted, running"
return input_text, output_text
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def __a ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __a ( self ) -> Dict:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCAmelCase_ = {}
for i, token in enumerate(_a ):
lowerCAmelCase_ = i
lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __a ( self ) -> Optional[int]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __a ( self ) -> List[str]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __a ( self ) -> Dict:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __a ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCAmelCase_ = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False
lowerCAmelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = ["的", "人", "有"]
lowerCAmelCase_ = "".join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = True
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = False
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 22 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=SCREAMING_SNAKE_CASE_ , )
assert hasattr(self , '''env''' )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]:
lowercase_ = f'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}'''
# distributed data settings
lowercase_ = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=SCREAMING_SNAKE_CASE_ , instance_count=SCREAMING_SNAKE_CASE_ , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE_ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=SCREAMING_SNAKE_CASE_ , py_version='''py36''' , )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
TrainingJobAnalytics(SCREAMING_SNAKE_CASE_ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]:
# create estimator
lowercase_ = self.create_estimator(SCREAMING_SNAKE_CASE_ )
# run training
estimator.fit()
# result dataframe
lowercase_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowercase_ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
lowercase_ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowercase_ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , SCREAMING_SNAKE_CASE_ )
| 30 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : str =DebertaVaTokenizer
A__ : List[str] =DebertaVaTokenizerFast
A__ : Any =True
A__ : str =True
def A_ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = 'this is a test'
SCREAMING_SNAKE_CASE__ = 'this is a test'
return input_text, output_text
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = '<pad>'
SCREAMING_SNAKE_CASE__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '[PAD]' )
self.assertEqual(len(UpperCAmelCase_ ) , 30001 )
def A_ ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def A_ ( self : Optional[Any] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = ' \tHeLLo!how \n Are yoU? '
SCREAMING_SNAKE_CASE__ = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def A_ ( self : Any ):
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def A_ ( self : Tuple ):
pass
def A_ ( self : List[str] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : List[str] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : int ):
# fmt: off
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Tuple ):
# fmt: off
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Any ):
# fmt: off
SCREAMING_SNAKE_CASE__ = ' \tHeLLo!how \n Are yoU? '
SCREAMING_SNAKE_CASE__ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = 'This is a test'
SCREAMING_SNAKE_CASE__ = [13, 1, 4398, 25, 21, 1289]
SCREAMING_SNAKE_CASE__ = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
SCREAMING_SNAKE_CASE__ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# fmt: off
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
SCREAMING_SNAKE_CASE__ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
SCREAMING_SNAKE_CASE__ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('sequence builders' )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('multi-sequence build' )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCAmelCase_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCAmelCase_ , )
@slow
def A_ ( self : Optional[Any] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = {'input_ids': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 176 | 0 |
def a__ ( UpperCAmelCase : str ) -> list[int]:
UpperCAmelCase : Union[str, Any] = [0 for i in range(len(_UpperCAmelCase ) )]
# initialize interval's left pointer and right pointer
UpperCAmelCase : Optional[int] = 0, 0
for i in range(1 , len(_UpperCAmelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCAmelCase : Any = min(right_pointer - i + 1 , z_result[i - left_pointer] )
UpperCAmelCase : Union[str, Any] = min_edge
while go_next(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCAmelCase : Any = i, i + z_result[i] - 1
return z_result
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : int ) -> bool:
return i + z_result[i] < len(_UpperCAmelCase ) and s[z_result[i]] == s[i + z_result[i]]
def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : str ) -> int:
UpperCAmelCase : Any = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCAmelCase : List[str] = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_UpperCAmelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_lowerCamelCase : List[str] = True
from torch.cuda.amp import autocast
_lowerCamelCase : Any = logging.getLogger(__name__)
@dataclass
class __UpperCAmelCase :
UpperCamelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to log verbose messages or not."""} , )
UpperCamelCase = field(
default=2.0 , metadata={"""help""": """Maximum temperature for gumbel softmax."""} )
UpperCamelCase = field(
default=0.5 , metadata={"""help""": """Minimum temperature for gumbel softmax."""} )
UpperCamelCase = field(
default=0.9_9_9_9_9_5 , metadata={"""help""": """Decay of gumbel temperature during training."""} )
def a__ ( UpperCAmelCase : ModelArguments , UpperCAmelCase : TrainingArguments ) -> Any:
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCAmelCase : Any = logging.WARNING
if model_args.verbose_logging:
UpperCAmelCase : Any = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
UpperCAmelCase : Any = logging.INFO
logger.setLevel(UpperCAmelCase )
@dataclass
class __UpperCAmelCase :
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCamelCase = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
UpperCamelCase = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
UpperCamelCase = field(
default="""file""" , metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} , )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
UpperCamelCase = field(
default=1 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
UpperCamelCase = field(
default=2_0.0 , metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""} )
@dataclass
class __UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = "longest"
UpperCamelCase = None
UpperCamelCase = None
def __call__( self : int, __A : List[Dict[str, Union[List[int], torch.Tensor]]] ):
# reformat list to dict and set to pytorch format
UpperCAmelCase : List[Any] = self.feature_extractor.pad(
__A, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='''pt''', )
UpperCAmelCase : int = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
UpperCAmelCase : Tuple = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
UpperCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
UpperCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
UpperCAmelCase : Tuple = 1
UpperCAmelCase : int = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
UpperCAmelCase : Dict = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__A, min_masks=2, )
return batch
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Union[str, Any], *__A : int, __A : Dict=1, __A : Any=0, __A : Optional[Any]=1.0, **__A : Any ):
super().__init__(*__A, **__A )
UpperCAmelCase : Any = 0
UpperCAmelCase : Any = max_gumbel_temp
UpperCAmelCase : Optional[Any] = min_gumbel_temp
UpperCAmelCase : str = gumbel_temp_decay
def __magic_name__ ( self : Dict, __A : nn.Module, __A : Dict[str, Union[torch.Tensor, Any]] ):
model.train()
UpperCAmelCase : List[Any] = self._prepare_inputs(__A )
if self.use_amp:
with autocast():
UpperCAmelCase : Optional[Any] = self.compute_loss(__A, __A )
else:
UpperCAmelCase : Optional[int] = self.compute_loss(__A, __A )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
UpperCAmelCase : Optional[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCAmelCase : str = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
UpperCAmelCase : Any = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__A ).backward()
elif self.use_apex:
with amp.scale_loss(__A, self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__A )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
return loss.detach()
def a__ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()
configure_logger(UpperCAmelCase , UpperCAmelCase )
# Downloading and loading a dataset from the hub.
UpperCAmelCase : int = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
UpperCAmelCase : Union[str, Any] = DatasetDict()
UpperCAmelCase : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
UpperCAmelCase : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
UpperCAmelCase : Optional[Any] = DatasetDict()
UpperCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
UpperCAmelCase : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=UpperCAmelCase )
def prepare_dataset(UpperCAmelCase : Dict ):
# check that all files have the correct sampling rate
UpperCAmelCase , UpperCAmelCase : Optional[Any] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
UpperCAmelCase : str = datasets.map(
UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
UpperCAmelCase : int = vectorized_datasets.filter(
lambda UpperCAmelCase : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(UpperCAmelCase : Dict ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
UpperCAmelCase : Any = vectorized_datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
UpperCAmelCase : Optional[int] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
UpperCAmelCase : Any = WavaVecaForPreTraining(UpperCAmelCase )
UpperCAmelCase : int = DataCollatorForWavaVecaPretraining(model=UpperCAmelCase , feature_extractor=UpperCAmelCase )
UpperCAmelCase : Any = WavaVecaPreTrainer(
model=UpperCAmelCase , data_collator=UpperCAmelCase , args=UpperCAmelCase , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=UpperCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 99 | 0 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowercase_ :
'''simple docstring'''
__snake_case = 42
__snake_case = None
__snake_case = None
UpperCAmelCase__ = namedtuple("CoinsDistribResult", "moves excess")
def _a ( a :TreeNode | None ) -> int:
if root is None:
return 0
# Validation
def count_nodes(a :TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(a :TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(a ) != count_coins(a ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(a :TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
a , a = get_distrib(node.left )
a , a = get_distrib(node.right )
a = 1 - left_distrib_excess
a = 1 - right_distrib_excess
a = (
left_distrib_moves
+ right_distrib_moves
+ abs(a )
+ abs(a )
)
a = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a , a )
return get_distrib(a )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 1 |
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __UpperCamelCase ( _A , _A ):
SCREAMING_SNAKE_CASE = "pixel_values"
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = TimmBackboneConfig
def __init__(self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Any):
requires_backends(self , "timm")
super().__init__(__SCREAMING_SNAKE_CASE)
A = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""")
if hasattr(__SCREAMING_SNAKE_CASE , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
A = getattr(__SCREAMING_SNAKE_CASE , "use_pretrained_backbone" , __SCREAMING_SNAKE_CASE)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
A = config.out_indices if getattr(__SCREAMING_SNAKE_CASE , "out_indices" , __SCREAMING_SNAKE_CASE) is not None else (-1,)
A = timm.create_model(
config.backbone , pretrained=__SCREAMING_SNAKE_CASE , features_only=config.features_only , in_chans=config.num_channels , out_indices=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
A = self._backbone.return_layers
A = {layer["module"]: str(__SCREAMING_SNAKE_CASE) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(__SCREAMING_SNAKE_CASE)
@classmethod
def SCREAMING_SNAKE_CASE__ (cls : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Dict):
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
A = kwargs.pop("config" , TimmBackboneConfig())
A = kwargs.pop("use_timm_backbone" , __SCREAMING_SNAKE_CASE)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
A = kwargs.pop("num_channels" , config.num_channels)
A = kwargs.pop("features_only" , config.features_only)
A = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
A = kwargs.pop("out_indices" , config.out_indices)
A = TimmBackboneConfig(
backbone=__SCREAMING_SNAKE_CASE , num_channels=__SCREAMING_SNAKE_CASE , features_only=__SCREAMING_SNAKE_CASE , use_pretrained_backbone=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , )
return super()._from_config(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Dict , __SCREAMING_SNAKE_CASE : str):
pass
def SCREAMING_SNAKE_CASE__ (self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Dict):
A = return_dict if return_dict is not None else self.config.use_return_dict
A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
A = self._all_layers
A = self._backbone(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
A = self._return_layers
A = tuple(hidden_states[i] for i in self.out_indices)
else:
A = self._backbone(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
A = None
A = tuple(__SCREAMING_SNAKE_CASE)
A = tuple(__SCREAMING_SNAKE_CASE) if hidden_states is not None else None
if not return_dict:
A = (feature_maps,)
if output_hidden_states:
A = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=__SCREAMING_SNAKE_CASE , hidden_states=__SCREAMING_SNAKE_CASE , attentions=__SCREAMING_SNAKE_CASE)
| 57 |
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class __UpperCamelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE = WavaVecaPhonemeCTCTokenizer
SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ (self : Tuple):
super().setUp()
A = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" ")
A = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE))))
A = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE) + "\n")
def SCREAMING_SNAKE_CASE__ (self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[Any]=2_0 , __SCREAMING_SNAKE_CASE : Any=5):
A = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE)) for i in range(len(__SCREAMING_SNAKE_CASE))]
A = list(filter(lambda __SCREAMING_SNAKE_CASE: [t[0]] == tokenizer.encode(t[1] , do_phonemize=__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE))
if max_length is not None and len(__SCREAMING_SNAKE_CASE) > max_length:
A = toks[:max_length]
if min_length is not None and len(__SCREAMING_SNAKE_CASE) < min_length and len(__SCREAMING_SNAKE_CASE) > 0:
while len(__SCREAMING_SNAKE_CASE) < min_length:
A = toks + toks
# toks_str = [t[1] for t in toks]
A = [t[0] for t in toks]
# Ensure consistency
A = tokenizer.decode(__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE)
if " " not in output_txt and len(__SCREAMING_SNAKE_CASE) > 1:
A = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE)
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE)
)
if with_prefix_space:
A = " " + output_txt
A = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE)
return output_txt, output_ids
def SCREAMING_SNAKE_CASE__ (self : List[Any] , **__SCREAMING_SNAKE_CASE : Any):
kwargs.update(self.special_tokens_map)
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Optional[Any]):
A = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
# check adding a single token
tokenizer.add_tokens("xxx")
A = tokenizer("m xxx ɪ" , do_phonemize=__SCREAMING_SNAKE_CASE).input_ids
self.assertEqual(__SCREAMING_SNAKE_CASE , [1_3, 3_9_2, 1_7]) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"])
A = tokenizer("m aaa ɪ ccc" , do_phonemize=__SCREAMING_SNAKE_CASE).input_ids
self.assertEqual(__SCREAMING_SNAKE_CASE , [1_3, 3_9_3, 1_7, 3_9_5]) # aaa and ccc should be after xxx and 2 after aaa
A = tokenizer("maɪ c" , do_phonemize=__SCREAMING_SNAKE_CASE).input_ids
self.assertEqual(__SCREAMING_SNAKE_CASE , [3, 2_0_0]) # mai should be <unk> (=3)
def SCREAMING_SNAKE_CASE__ (self : Tuple):
A = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
A = "Hello how are you"
A = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us")
self.assertEqual(__SCREAMING_SNAKE_CASE , "h ə l oʊ h aʊ ɑːɹ j uː")
def SCREAMING_SNAKE_CASE__ (self : List[str]):
A = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
A = "Hello how are you"
A = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us")
self.assertEqual(tokenizer(__SCREAMING_SNAKE_CASE).input_ids , tokenizer(__SCREAMING_SNAKE_CASE , do_phonemize=__SCREAMING_SNAKE_CASE).input_ids)
def SCREAMING_SNAKE_CASE__ (self : Any):
A = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
A = "Hello how are you"
A = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us")
A = tokenizer.decode(tokenizer(__SCREAMING_SNAKE_CASE).input_ids)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : str):
A = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
A = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7],
]
A = tokenizer.decode(sample_ids[0])
A = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , batch_tokens[0])
self.assertEqual(__SCREAMING_SNAKE_CASE , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"])
def SCREAMING_SNAKE_CASE__ (self : List[str]):
A = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|")
tokenizer.add_tokens("|")
A = "Hello how are you"
A = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us")
self.assertEqual(__SCREAMING_SNAKE_CASE , "h ə l oʊ | h aʊ | ɑːɹ | j uː |")
def SCREAMING_SNAKE_CASE__ (self : str):
A = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|")
tokenizer.add_tokens("|")
A = "Hello how are you"
A = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us")
self.assertEqual(tokenizer(__SCREAMING_SNAKE_CASE).input_ids , tokenizer(__SCREAMING_SNAKE_CASE , do_phonemize=__SCREAMING_SNAKE_CASE).input_ids)
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
A = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|")
tokenizer.add_tokens("|")
# fmt: off
A = [
[1_1, 5, 1_5, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 1_5, 8, tokenizer.word_delimiter_token_id, 9_8],
[tokenizer.word_delimiter_token_id, 2_4, 2_2, tokenizer.word_delimiter_token_id, 5, 2_4, 2_2, 5, 7_7],
]
# fmt: on
# decode with word_del_token filter
A = tokenizer.decode(sample_ids[0])
A = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , batch_tokens[0])
self.assertEqual(__SCREAMING_SNAKE_CASE , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"])
# decode with no word_del_token filter
A = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__SCREAMING_SNAKE_CASE)
A = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , filter_word_delimiter_token=__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , batch_tokens[0])
self.assertEqual(__SCREAMING_SNAKE_CASE , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"])
def SCREAMING_SNAKE_CASE__ (self : Dict):
A = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|")
tokenizer.add_tokens("|")
A = "Hello how are you"
A = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us")
A = tokenizer.decode(tokenizer(__SCREAMING_SNAKE_CASE).input_ids , filter_word_delimiter_token=__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
A = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|")
tokenizer.add_tokens("|")
A = "Hello how are you"
A = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us")
A = tokenizer.decode(tokenizer(__SCREAMING_SNAKE_CASE).input_ids , filter_word_delimiter_token=__SCREAMING_SNAKE_CASE)
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |")]).strip() , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Dict):
A = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=__SCREAMING_SNAKE_CASE)
A = "Hello how are you"
A = tokenizer(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us").input_ids
A = tokenizer(__SCREAMING_SNAKE_CASE , phonemizer_lang="fr-fr").input_ids
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = tokenizer.decode(__SCREAMING_SNAKE_CASE)
A = tokenizer.decode(__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , "h ə l oʊ h aʊ ɑːɹ j uː")
self.assertEqual(__SCREAMING_SNAKE_CASE , "ɛ l o h aʊ a ʁ j u")
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
A = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
A = "Hello how Are you"
A = "hello how are you"
A = tokenizer(__SCREAMING_SNAKE_CASE).input_ids
A = tokenizer(__SCREAMING_SNAKE_CASE).input_ids
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
A = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
tokenizer.add_tokens(["!", "?"])
tokenizer.add_special_tokens({"cls_token": "$$$"})
# fmt: off
A = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8, 3_9_2, 3_9_2, 3_9_3, 3_9_2, 3_9_2, 3_9_3, 3_9_4, 3_9_4],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7, tokenizer.pad_token_id, 3_9_4, 3_9_4],
]
# fmt: on
A = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"])
@staticmethod
def SCREAMING_SNAKE_CASE__ (__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]):
A = [d[key] for d in offsets]
return retrieved_list
def SCREAMING_SNAKE_CASE__ (self : Optional[Any]):
A = self.get_tokenizer(word_delimiter_token="|")
tokenizer.add_tokens("|")
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
A = [1_1, 5, 5, 5, 1_5, 1_5, tokenizer.pad_token_id, 1_5, 1_5, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 1_5, 8, 8, 8, tokenizer.word_delimiter_token_id, 9_8]
# fmt: on
A = tokenizer.decode(__SCREAMING_SNAKE_CASE , output_char_offsets=__SCREAMING_SNAKE_CASE , filter_word_delimiter_token=__SCREAMING_SNAKE_CASE)
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys()) , 2)
self.assertTrue("text" in outputs)
self.assertTrue("char_offsets" in outputs)
self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char")) , outputs.text)
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char") , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"])
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset") , [0, 1, 4, 7, 9, 1_1, 1_2, 1_5, 1_6])
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset") , [1, 4, 6, 9, 1_0, 1_2, 1_5, 1_6, 1_7])
def SCREAMING_SNAKE_CASE__ (self : Any):
A = self.get_tokenizer(word_delimiter_token="|")
def check_list_tuples_equal(__SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any]):
self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
self.assertTrue(isinstance(outputs_list[0] , __SCREAMING_SNAKE_CASE))
# transform list to ModelOutput
A = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]})
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"])
def recursive_check(__SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any]):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
[recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) for la, la in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"])
# fmt: off
A = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 4, 8, 9_8, 3_2, 3_2, 3_2, 3_2, 4, 3_3, tokenizer.word_delimiter_token_id, 3_2, 3_2, 3_3, 3_4, 3_4],
[2_4, 2_2, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 2_4, 2_2, 2_2, 2_2, 4, 5, 7_7, tokenizer.pad_token_id, 2_2, 2_2, 4, 3_4, 3_4, 3_4, 3_4],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
A = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , output_char_offsets=__SCREAMING_SNAKE_CASE)
A = [tokenizer.decode(__SCREAMING_SNAKE_CASE , output_char_offsets=__SCREAMING_SNAKE_CASE) for ids in sample_ids]
check_list_tuples_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes")
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes")
def SCREAMING_SNAKE_CASE__ (self : Dict):
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency")
def SCREAMING_SNAKE_CASE__ (self : str):
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing")
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
pass
def SCREAMING_SNAKE_CASE__ (self : List[str]):
A = self.get_tokenizers(do_lower_case=__SCREAMING_SNAKE_CASE)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
A = tokenizer.vocab_size
A = len(__SCREAMING_SNAKE_CASE)
self.assertNotEqual(__SCREAMING_SNAKE_CASE , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A = ["aaaaa bbbbbb", "cccccccccdddddddd"]
A = tokenizer.add_tokens(__SCREAMING_SNAKE_CASE)
A = tokenizer.vocab_size
A = len(__SCREAMING_SNAKE_CASE)
self.assertNotEqual(__SCREAMING_SNAKE_CASE , 0)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE))
self.assertEqual(__SCREAMING_SNAKE_CASE , all_size + len(__SCREAMING_SNAKE_CASE))
A = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=__SCREAMING_SNAKE_CASE)
self.assertGreaterEqual(len(__SCREAMING_SNAKE_CASE) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
A = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
A = tokenizer.add_special_tokens(__SCREAMING_SNAKE_CASE)
A = tokenizer.vocab_size
A = len(__SCREAMING_SNAKE_CASE)
self.assertNotEqual(__SCREAMING_SNAKE_CASE , 0)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE))
self.assertEqual(__SCREAMING_SNAKE_CASE , all_size_a + len(__SCREAMING_SNAKE_CASE))
A = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=__SCREAMING_SNAKE_CASE)
self.assertGreaterEqual(len(__SCREAMING_SNAKE_CASE) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.")
def SCREAMING_SNAKE_CASE__ (self : List[str]):
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.")
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
pass
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
A = self.get_tokenizers(fast=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
A = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
A = tokenizer.convert_tokens_to_string(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(output["text"] , __SCREAMING_SNAKE_CASE)
| 57 | 1 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : str ) -> str:
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
lowerCamelCase_ : Optional[Any] =""
while len(lowerCamelCase__ ) % 3 != 0:
lowerCamelCase_ : Any ="0" + bin_string
lowerCamelCase_ : int =[
bin_string[index : index + 3]
for index in range(len(lowerCamelCase__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
lowerCamelCase_ : int =0
for index, val in enumerate(lowerCamelCase__ ):
oct_val += int(2 ** (2 - index) * int(lowerCamelCase__ ) )
oct_string += str(lowerCamelCase__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 144 |
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
A__ : Optional[Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
A__ : Tuple = 'main'
# Default branch name
A__ : Optional[Any] = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
A__ : Tuple = 'aaaaaaa'
# This commit does not exist, so we should 404.
A__ : Tuple = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
A__ : Any = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def _snake_case ( ) -> str:
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def _snake_case ( ) -> List[str]:
print("Bonjour!" )
yield
print("Au revoir!" )
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[int] ):
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class lowercase__ ( unittest.TestCase ):
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : int ):
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Optional[int] ):
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : List[str] ):
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def UpperCAmelCase__ ( self : int ):
self.assertEqual(find_labels(snake_case__ ) , ["labels"] )
self.assertEqual(find_labels(snake_case__ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(snake_case__ ) , ["start_positions", "end_positions"] )
class lowercase__ ( snake_case__ ):
pass
self.assertEqual(find_labels(snake_case__ ) , ["labels"] )
@require_tf
def UpperCAmelCase__ ( self : int ):
self.assertEqual(find_labels(snake_case__ ) , ["labels"] )
self.assertEqual(find_labels(snake_case__ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(snake_case__ ) , ["start_positions", "end_positions"] )
class lowercase__ ( snake_case__ ):
pass
self.assertEqual(find_labels(snake_case__ ) , ["labels"] )
@require_flax
def UpperCAmelCase__ ( self : Tuple ):
# Flax models don't have labels
self.assertEqual(find_labels(snake_case__ ) , [] )
self.assertEqual(find_labels(snake_case__ ) , [] )
self.assertEqual(find_labels(snake_case__ ) , [] )
class lowercase__ ( snake_case__ ):
pass
self.assertEqual(find_labels(snake_case__ ) , [] )
| 144 | 1 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__UpperCAmelCase :Optional[int] = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__UpperCAmelCase :List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _a ( _lowercase : str ):
'''simple docstring'''
if "://" in dataset_path:
__UpperCAmelCase : Union[str, Any] = dataset_path.split('''://''' )[1]
return dataset_path
def _a ( _lowercase : List[Any] ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _a ( _lowercase : Optional[Any] , _lowercase : Any , _lowercase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = not is_remote_filesystem(a__ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(a__ ) , fs._strip_protocol(a__ ) )
else:
fs.mv(a__ , a__ , recursive=a__ )
def _a ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : int = threading.Lock() | 369 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = StableDiffusionXLImgaImgPipeline
SCREAMING_SNAKE_CASE : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
SCREAMING_SNAKE_CASE : Optional[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
SCREAMING_SNAKE_CASE : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase__ ( self : Any ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=snake_case , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__UpperCAmelCase : int = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
__UpperCAmelCase : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , )
__UpperCAmelCase : Tuple = CLIPTextModel(snake_case )
__UpperCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=snake_case )
__UpperCAmelCase : Optional[Any] = CLIPTextModelWithProjection(snake_case )
__UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=snake_case )
__UpperCAmelCase : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowerCamelCase__ ( self : Dict , snake_case : Optional[int] , snake_case : List[str]=0 ) -> List[str]:
__UpperCAmelCase : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case ) ).to(snake_case )
__UpperCAmelCase : Optional[Any] = image / 2 + 0.5
if str(snake_case ).startswith('''mps''' ):
__UpperCAmelCase : List[str] = torch.manual_seed(snake_case )
else:
__UpperCAmelCase : Any = torch.Generator(device=snake_case ).manual_seed(snake_case )
__UpperCAmelCase : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Optional[Any] = self.get_dummy_components()
__UpperCAmelCase : Any = StableDiffusionXLImgaImgPipeline(**snake_case )
__UpperCAmelCase : Union[str, Any] = sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
__UpperCAmelCase : Optional[int] = self.get_dummy_inputs(snake_case )
__UpperCAmelCase : int = sd_pipe(**snake_case ).images
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : Any = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : Any ) -> int:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
pass
def lowerCamelCase__ ( self : Optional[int] ) -> int:
__UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
__UpperCAmelCase : List[Any] = StableDiffusionXLImgaImgPipeline(**snake_case )
__UpperCAmelCase : Tuple = sd_pipe.to(snake_case )
__UpperCAmelCase : Tuple = sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
# forward without prompt embeds
__UpperCAmelCase : Tuple = self.get_dummy_inputs(snake_case )
__UpperCAmelCase : Optional[Any] = 3 * ['''this is a negative prompt''']
__UpperCAmelCase : Optional[int] = negative_prompt
__UpperCAmelCase : Tuple = 3 * [inputs['''prompt''']]
__UpperCAmelCase : Optional[Any] = sd_pipe(**snake_case )
__UpperCAmelCase : int = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(snake_case )
__UpperCAmelCase : Tuple = 3 * ['''this is a negative prompt''']
__UpperCAmelCase : str = 3 * [inputs.pop('''prompt''' )]
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = sd_pipe.encode_prompt(snake_case , negative_prompt=snake_case )
__UpperCAmelCase : Dict = sd_pipe(
**snake_case , prompt_embeds=snake_case , negative_prompt_embeds=snake_case , pooled_prompt_embeds=snake_case , negative_pooled_prompt_embeds=snake_case , )
__UpperCAmelCase : List[str] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : int , snake_case : Any , snake_case : str="cpu" , snake_case : Tuple=torch.floataa , snake_case : List[str]=0 ) -> Tuple:
__UpperCAmelCase : Optional[Any] = torch.Generator(device=snake_case ).manual_seed(snake_case )
__UpperCAmelCase : int = np.random.RandomState(snake_case ).standard_normal((1, 4, 64, 64) )
__UpperCAmelCase : str = torch.from_numpy(snake_case ).to(device=snake_case , dtype=snake_case )
__UpperCAmelCase : str = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : str = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
__UpperCAmelCase : str = self.get_inputs(snake_case )
__UpperCAmelCase : Dict = pipe(**snake_case ).images
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase : Tuple = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3 | 240 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase (lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : int = IFInpaintingPipeline
lowerCAmelCase__ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
lowerCAmelCase__ : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase__ : List[str] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
return self._get_dummy_components()
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=0 ):
'''simple docstring'''
if str(UpperCamelCase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase )
else:
lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
lowercase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
self._test_save_load_local()
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 2 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
lowerCamelCase : Any = re.compile(R'([A-Z]+)([A-Z][a-z])')
lowerCamelCase : str = re.compile(R'([a-z\d])([A-Z])')
lowerCamelCase : Optional[int] = re.compile(R'(?<!_)_(?!_)')
lowerCamelCase : List[Any] = re.compile(R'(_{2,})')
lowerCamelCase : str = R'^\w+(\.\w+)*$'
lowerCamelCase : Dict = R'<>:/\|?*'
def _SCREAMING_SNAKE_CASE (A ) -> Any:
"""simple docstring"""
lowercase__ = _uppercase_uppercase_re.sub(R'''\1_\2''' , A )
lowercase__ = _lowercase_uppercase_re.sub(R'''\1_\2''' , A )
return name.lower()
def _SCREAMING_SNAKE_CASE (A ) -> Tuple:
"""simple docstring"""
lowercase__ = _single_underscore_re.split(A )
lowercase__ = [_multiple_underscores_re.split(A ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(A ) if n != '''''' )
def _SCREAMING_SNAKE_CASE (A ) -> Tuple:
"""simple docstring"""
if os.path.basename(A ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]:
"""simple docstring"""
if os.path.basename(A ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , A ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(A )}-{split}"
def _SCREAMING_SNAKE_CASE (A , A , A , A=None ) -> List[str]:
"""simple docstring"""
lowercase__ = filename_prefix_for_split(A , A )
if filetype_suffix:
prefix += f".{filetype_suffix}"
lowercase__ = os.path.join(A , A )
return f"{filepath}*"
def _SCREAMING_SNAKE_CASE (A , A , A , A=None , A=None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = filename_prefix_for_split(A , A )
lowercase__ = os.path.join(A , A )
if shard_lengths:
lowercase__ = len(A )
lowercase__ = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(A )]
if filetype_suffix:
lowercase__ = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
lowercase__ = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 2 | 1 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = False ):
if radian_mode:
return [magnitude * cos(__lowerCamelCase ), magnitude * sin(__lowerCamelCase )]
return [magnitude * cos(radians(__lowerCamelCase ) ), magnitude * sin(radians(__lowerCamelCase ) )]
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 10**-1 ):
UpperCAmelCase_ : NDArray[floataa] = cross(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : float = sum(__lowerCamelCase )
return abs(__lowerCamelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
_a = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
_a = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_a = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
_a = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_a = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
_a = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 23 |
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_a = 0
_a = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_a = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_a = tuple[int, int]
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : int = pos_x
UpperCAmelCase_ : List[Any] = pos_y
UpperCAmelCase_ : Union[str, Any] = (pos_y, pos_x)
UpperCAmelCase_ : Any = goal_x
UpperCAmelCase_ : Dict = goal_y
UpperCAmelCase_ : Any = g_cost
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = self.calculate_heuristic()
UpperCAmelCase_ : Any = self.g_cost + self.h_cost
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.pos_x - self.goal_x
UpperCAmelCase_ : Union[str, Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowercase_ ) + abs(lowercase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , lowercase_ ):
"""simple docstring"""
return self.f_cost < other.f_cost
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase_ )
UpperCAmelCase_ : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , lowercase_ )
UpperCAmelCase_ : str = [self.start]
UpperCAmelCase_ : list[Node] = []
UpperCAmelCase_ : int = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase_ : List[str] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowercase_ )
self.closed_nodes.append(lowercase_ )
UpperCAmelCase_ : str = self.get_successors(lowercase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowercase_ )
else:
# retrieve the best current path
UpperCAmelCase_ : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(lowercase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowercase_ )
else:
self.open_nodes.append(lowercase_ )
return [self.start.pos]
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = []
for action in delta:
UpperCAmelCase_ : str = parent.pos_x + action[1]
UpperCAmelCase_ : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase_ , ) )
return successors
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = node
UpperCAmelCase_ : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase_ : Optional[int] = current_node.parent
path.reverse()
return path
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = AStar(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = AStar(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase_ : List[str] = self.fwd_astar.open_nodes.pop(0 )
UpperCAmelCase_ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowercase_ , lowercase_ )
self.fwd_astar.closed_nodes.append(lowercase_ )
self.bwd_astar.closed_nodes.append(lowercase_ )
UpperCAmelCase_ : Tuple = current_bwd_node
UpperCAmelCase_ : str = current_fwd_node
UpperCAmelCase_ : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowercase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowercase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowercase_ )
else:
# retrieve the best current path
UpperCAmelCase_ : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(lowercase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowercase_ )
else:
astar.open_nodes.append(lowercase_ )
return [self.fwd_astar.start.pos]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.fwd_astar.retrace_path(lowercase_ )
UpperCAmelCase_ : int = self.bwd_astar.retrace_path(lowercase_ )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ : Any = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_a = (0, 0)
_a = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_a = time.time()
_a = AStar(init, goal)
_a = a_star.search()
_a = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
_a = time.time()
_a = BidirectionalAStar(init, goal)
_a = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 23 | 1 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
"""simple docstring"""
def __init__( self :int , snake_case :Optional[int] , snake_case :Union[str, Any]=13 , snake_case :Optional[int]=7 , snake_case :int=True , snake_case :Any=True , snake_case :Union[str, Any]=True , snake_case :List[Any]=True , snake_case :int=99 , snake_case :Tuple=24 , snake_case :Any=2 , snake_case :Tuple=6 , snake_case :int=37 , snake_case :Optional[Any]="gelu" , snake_case :Optional[Any]=0.1 , snake_case :List[str]=0.1 , snake_case :str=512 , snake_case :List[str]=16 , snake_case :int=2 , snake_case :List[str]=0.02 , snake_case :int=3 , snake_case :Optional[Any]=None , snake_case :str=1_000 , ):
'''simple docstring'''
A_ : List[str] = parent
A_ : Union[str, Any] = batch_size
A_ : Tuple = seq_length
A_ : Any = is_training
A_ : Optional[int] = use_input_mask
A_ : Any = use_token_type_ids
A_ : int = use_labels
A_ : str = vocab_size
A_ : int = hidden_size
A_ : Dict = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : str = intermediate_size
A_ : int = hidden_act
A_ : str = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Union[str, Any] = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : int = type_sequence_label_size
A_ : str = initializer_range
A_ : List[Any] = num_labels
A_ : List[str] = scope
A_ : str = range_bbox
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A_ : Union[str, Any] = bbox[i, j, 3]
A_ : List[str] = bbox[i, j, 1]
A_ : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A_ : Optional[int] = bbox[i, j, 2]
A_ : str = bbox[i, j, 0]
A_ : str = t
A_ : Any = None
if self.use_input_mask:
A_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
A_ : List[Any] = None
if self.use_token_type_ids:
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Optional[int] = None
A_ : List[str] = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : Tuple = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Optional[Any] , snake_case :str , snake_case :Union[str, Any] , snake_case :Any , snake_case :Any , snake_case :List[str] , snake_case :Any , ):
'''simple docstring'''
A_ : str = LiltModel(config=snake_case )
model.to(snake_case )
model.eval()
A_ : str = model(snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case )
A_ : int = model(snake_case , bbox=snake_case , token_type_ids=snake_case )
A_ : List[Any] = model(snake_case , bbox=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :str , snake_case :List[Any] , snake_case :Union[str, Any] , snake_case :Union[str, Any] , snake_case :int , snake_case :Union[str, Any] , snake_case :Tuple , ):
'''simple docstring'''
A_ : Tuple = self.num_labels
A_ : List[str] = LiltForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
A_ : Dict = model(
snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :int , snake_case :Optional[Any] , snake_case :List[str] , snake_case :str , snake_case :str , snake_case :Any , snake_case :Union[str, Any] , ):
'''simple docstring'''
A_ : Optional[int] = LiltForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
A_ : Optional[int] = model(
snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : Tuple = self.prepare_config_and_inputs()
(
A_
) : Optional[int] = config_and_inputs
A_ : Any = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class __magic_name__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :List[Any] , snake_case :int , snake_case :Any , snake_case :str , snake_case :List[str] ):
'''simple docstring'''
return True
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : int = LiltModelTester(self )
A_ : Tuple = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Union[str, Any] = type
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[int] = LiltModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
@slow
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(snake_case )
A_ : List[Any] = torch.tensor([[1, 2]] , device=snake_case )
A_ : List[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=snake_case )
# forward pass
with torch.no_grad():
A_ : str = model(input_ids=snake_case , bbox=snake_case )
A_ : List[str] = torch.Size([1, 2, 768] )
A_ : Any = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=snake_case , )
self.assertTrue(outputs.last_hidden_state.shape , snake_case )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , snake_case , atol=1e-3 ) )
| 300 |
from itertools import count
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 50 ) -> int:
lowerCamelCase__ : Optional[Any] = [1] * min_block_length
for n in count(_UpperCAmelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCAmelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( __A : list[int] ) -> list[int]:
_SCREAMING_SNAKE_CASE = len(__A )
for i in range(__A ):
for j in range(i + 1 , __A ):
if numbers[j] < numbers[i]:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCamelCase_ = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase_ = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 111 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str:
_SCREAMING_SNAKE_CASE = len(__A )
_SCREAMING_SNAKE_CASE = len(__A )
_SCREAMING_SNAKE_CASE = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_SCREAMING_SNAKE_CASE = []
for char_count in range(__A ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__A )
if __name__ == "__main__":
print(alternative_string_arrange('AB', 'XYZ'), end=' ')
| 111 | 1 |
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
snake_case__ : Optional[Any] = 300 # TEMPERATURE (unit = K)
def _snake_case ( _snake_case : float , _snake_case : float , _snake_case : float , ):
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=None , ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = parent
a__ : List[str] = batch_size
a__ : List[str] = image_size
a__ : Dict = patch_size
a__ : Optional[Any] = num_channels
a__ : List[Any] = is_training
a__ : str = use_labels
a__ : Dict = hidden_size
a__ : Tuple = num_hidden_layers
a__ : Tuple = num_attention_heads
a__ : Union[str, Any] = intermediate_size
a__ : List[str] = hidden_act
a__ : List[str] = hidden_dropout_prob
a__ : Any = attention_probs_dropout_prob
a__ : Dict = type_sequence_label_size
a__ : Tuple = initializer_range
a__ : Optional[int] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : List[str] = (image_size // patch_size) ** 2
a__ : Any = num_patches + 1
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ : Tuple = None
if self.use_labels:
a__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__ : int = ViTMSNModel(config=lowercase)
model.to(lowercase)
model.eval()
a__ : int = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = self.type_sequence_label_size
a__ : List[str] = ViTMSNForImageClassification(lowercase)
model.to(lowercase)
model.eval()
a__ : int = model(lowercase , labels=lowercase)
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}')
print('Labels: {labels}')
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a__ : List[str] = 1
a__ : Optional[int] = ViTMSNForImageClassification(lowercase)
model.to(lowercase)
model.eval()
a__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a__ : Dict = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[Any] = self.prepare_config_and_inputs()
a__ , a__ , a__ : Optional[Any] = config_and_inputs
a__ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Any = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__A : Tuple = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__A : List[str] = False
__A : Optional[Any] = False
__A : Union[str, Any] = False
__A : Any = False
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = ViTMSNModelTester(self)
a__ : Union[str, Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds')
def __lowercase ( self) -> Any:
'''simple docstring'''
pass
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Union[str, Any] = model_class(lowercase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear))
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ , a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Dict = model_class(lowercase)
a__ : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : str = [*signature.parameters.keys()]
a__ : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase)
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Tuple = ViTMSNModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def A_ ( ) -> Dict:
a__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small') if is_vision_available() else None
@slow
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
torch.manual_seed(2)
a__ : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small').to(lowercase)
a__ : Any = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Optional[Any] = image_processor(images=lowercase , return_tensors='pt').to(lowercase)
# forward pass
with torch.no_grad():
a__ : Tuple = model(**lowercase)
# verify the logits
a__ : Union[str, Any] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase)
a__ : Any = torch.tensor([-0.08_03, -0.44_54, -0.23_75]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4))
| 99 | 0 |
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__):
# we need a list not a string, so do something to change the type
__SCREAMING_SNAKE_CASE = arr.split(""",""")
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = [int(self.array[0])] * len(self.array)
__SCREAMING_SNAKE_CASE = [int(self.array[0])] * len(self.array)
for i in range(1 , len(self.array)):
__SCREAMING_SNAKE_CASE = max(
int(self.array[i]) + sum_value[i - 1] , int(self.array[i]))
__SCREAMING_SNAKE_CASE = max(sum_value[i] , rear[i - 1])
return rear[len(self.array) - 1]
if __name__ == "__main__":
__magic_name__ = input("please input some numbers:")
__magic_name__ = SubArray(whole_array)
__magic_name__ = array.solve_sub_array()
print(("the results is:", re))
| 255 |
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _lowerCAmelCase ( *UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_=True , UpperCamelCase_=2 ):
from .. import __version__
__SCREAMING_SNAKE_CASE = take_from
__SCREAMING_SNAKE_CASE = ()
if not isinstance(args[0] , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(UpperCamelCase_ ).base_version ) >= version.parse(UpperCamelCase_ ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
__SCREAMING_SNAKE_CASE = None
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(UpperCamelCase_ ),)
__SCREAMING_SNAKE_CASE = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(UpperCamelCase_ , UpperCamelCase_ ):
values += (getattr(UpperCamelCase_ , UpperCamelCase_ ),)
__SCREAMING_SNAKE_CASE = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
__SCREAMING_SNAKE_CASE = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
__SCREAMING_SNAKE_CASE = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , UpperCamelCase_ , stacklevel=UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) > 0:
__SCREAMING_SNAKE_CASE = inspect.getouterframes(inspect.currentframe() )[1]
__SCREAMING_SNAKE_CASE = call_frame.filename
__SCREAMING_SNAKE_CASE = call_frame.lineno
__SCREAMING_SNAKE_CASE = call_frame.function
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(UpperCamelCase_ ) == 0:
return
elif len(UpperCamelCase_ ) == 1:
return values[0]
return values
| 255 | 1 |
from __future__ import annotations
import math
def lowerCAmelCase_ ( __A, __A ) -> float:
'''simple docstring'''
UpperCAmelCase__ = u
for i in range(1, __A ):
UpperCAmelCase__ = temp * (u - i)
return temp
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase__ = int(input("enter the numbers of values: " ) )
UpperCAmelCase__ = []
for _ in range(__A ):
y.append([] )
for i in range(__A ):
for j in range(__A ):
y[i].append(__A )
UpperCAmelCase__ = 0
print("enter the values of parameters in a list: " )
UpperCAmelCase__ = list(map(__A, input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(__A ):
UpperCAmelCase__ = float(input() )
UpperCAmelCase__ = int(input("enter the value to interpolate: " ) )
UpperCAmelCase__ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1, __A ):
for j in range(n - i ):
UpperCAmelCase__ = y[j + 1][i - 1] - y[j][i - 1]
UpperCAmelCase__ = y[0][0]
for i in range(1, __A ):
summ += (ucal(__A, __A ) * y[0][i]) / math.factorial(__A )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 65 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__lowerCAmelCase : int = logging.get_logger(__name__)
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = ['''pixel_values''']
def __init__( self : Dict , _snake_case : bool = True , _snake_case : Optional[Dict[str, int]] = None , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : bool = True , _snake_case : Union[int, float] = 1 / 255 , _snake_case : bool = True , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , **_snake_case : Tuple , ):
super().__init__(**_snake_case )
__lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 256}
__lowercase : Dict = get_size_dict(_snake_case , default_to_square=_snake_case )
__lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__lowercase : Any = get_size_dict(_snake_case , param_name='''crop_size''' )
__lowercase : int = do_resize
__lowercase : Union[str, Any] = size
__lowercase : Optional[int] = resample
__lowercase : str = do_center_crop
__lowercase : str = crop_size
__lowercase : Optional[int] = do_rescale
__lowercase : str = rescale_factor
__lowercase : List[Any] = do_normalize
__lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_ ( self : List[str] , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : PILImageResampling = PILImageResampling.BICUBIC , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Any , ):
__lowercase : Union[str, Any] = get_size_dict(_snake_case , default_to_square=_snake_case )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__lowercase : Dict = get_resize_output_image_size(_snake_case , size=size['''shortest_edge'''] , default_to_square=_snake_case )
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case )
def snake_case_ ( self : Union[str, Any] , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : str , ):
__lowercase : str = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_snake_case , size=(size['''height'''], size['''width''']) , data_format=_snake_case , **_snake_case )
def snake_case_ ( self : str , _snake_case : np.ndarray , _snake_case : float , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Any ):
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def snake_case_ ( self : Any , _snake_case : np.ndarray , _snake_case : Union[float, List[float]] , _snake_case : Union[float, List[float]] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : str , ):
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def snake_case_ ( self : Optional[Any] , _snake_case : ImageInput , _snake_case : Optional[bool] = None , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = None , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[float] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_snake_case : int , ):
__lowercase : List[str] = do_resize if do_resize is not None else self.do_resize
__lowercase : str = size if size is not None else self.size
__lowercase : Any = get_size_dict(_snake_case , default_to_square=_snake_case )
__lowercase : str = resample if resample is not None else self.resample
__lowercase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[str] = crop_size if crop_size is not None else self.crop_size
__lowercase : int = get_size_dict(_snake_case , param_name='''crop_size''' )
__lowercase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Any = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__lowercase : Dict = image_std if image_std is not None else self.image_std
__lowercase : Any = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowercase : List[str] = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
if do_center_crop:
__lowercase : Dict = [self.center_crop(image=_snake_case , size=_snake_case ) for image in images]
if do_rescale:
__lowercase : List[str] = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images]
if do_normalize:
__lowercase : str = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case ) for image in images]
__lowercase : Dict = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
__lowercase : List[str] = {'''pixel_values''': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
def snake_case_ ( self : Optional[int] , _snake_case : str , _snake_case : List[Tuple] = None ):
__lowercase : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_snake_case ) != len(_snake_case ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_snake_case ):
__lowercase : str = target_sizes.numpy()
__lowercase : Union[str, Any] = []
for idx in range(len(_snake_case ) ):
__lowercase : Union[str, Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_snake_case )
__lowercase : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_snake_case )
else:
__lowercase : str = logits.argmax(dim=1 )
__lowercase : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 156 | 0 |
"""simple docstring"""
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = False ) -> Dict:
_a = scheduler
_a = optimizers if isinstance(__UpperCAmelCase , (list, tuple) ) else [optimizers]
_a = split_batches
_a = step_with_optimizer
_a = GradientState()
def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_a = AcceleratorState().num_processes
for _ in range(__UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
else:
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
return self.scheduler.get_last_lr()
def _UpperCAmelCase ( self ) -> List[Any]:
return self.scheduler.state_dict()
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
self.scheduler.load_state_dict(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
return self.scheduler.get_lr()
def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
return self.scheduler.print_lr(*__UpperCAmelCase , **__UpperCAmelCase ) | 365 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
'''simple docstring'''
@staticmethod
def _UpperCAmelCase ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
pass
def A_ ( _lowerCAmelCase : Image ):
"""simple docstring"""
_a = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
_a = DepthEstimationPipeline(model=__UpperCAmelCase , image_processor=__UpperCAmelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
_a = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , __UpperCAmelCase )
import datasets
_a = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
_a = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , __UpperCAmelCase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[str]:
_a = '''Intel/dpt-large'''
_a = pipeline('''depth-estimation''' , model=__UpperCAmelCase )
_a = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
_a = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' ) | 153 | 0 |
from __future__ import annotations
import os
from collections.abc import Mapping
_lowercase : Any =tuple[int, int]
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase , __lowercase ) -> None:
"""simple docstring"""
a__ : set[int] = vertices
a__ : dict[EdgeT, int] = {
(min(__lowercase ), max(__lowercase )): weight for edge, weight in edges.items()
}
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> None:
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
a__ : List[str] = weight
def SCREAMING_SNAKE_CASE__( self ) -> Graph:
"""simple docstring"""
a__ : Graph = Graph({min(self.vertices )} , {} )
a__ : EdgeT
a__ : int
a__ : EdgeT
a__ : int
while len(subgraph.vertices ) < len(self.vertices ):
a__ : str = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
a__ : Optional[Any] = edge
a__ : List[Any] = weight
subgraph.add_edge(__lowercase , __lowercase )
return subgraph
def lowerCAmelCase_ ( _lowercase : str = "p107_network.txt") -> int:
"""simple docstring"""
a__ : str = os.path.abspath(os.path.dirname(_lowercase))
a__ : str = os.path.join(_lowercase , _lowercase)
a__ : dict[EdgeT, int] = {}
a__ : list[str]
a__ : int
a__ : int
with open(_lowercase) as f:
a__ : Union[str, Any] = f.read().strip().split("""\n""")
a__ : int = [line.split(""",""") for line in data]
for edgea in range(1 , len(_lowercase)):
for edgea in range(_lowercase):
if adjaceny_matrix[edgea][edgea] != "-":
a__ : List[str] = int(adjaceny_matrix[edgea][edgea])
a__ : Graph = Graph(set(range(len(_lowercase))) , _lowercase)
a__ : Graph = graph.prims_algorithm()
a__ : int = sum(graph.edges.values())
a__ : int = sum(subgraph.edges.values())
return initial_total - optimal_total
if __name__ == "__main__":
print(f'{solution() = }')
| 170 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class snake_case__ (datasets.BuilderConfig ):
"""simple docstring"""
__lowerCAmelCase :Optional[datasets.Features] = None
class snake_case__ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
__lowerCAmelCase :Dict = PandasConfig
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tuple:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
a__ : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowercase , (str, list, tuple) ):
a__ : Optional[int] = data_files
if isinstance(__lowercase , __lowercase ):
a__ : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a__ : str = [dl_manager.iter_files(__lowercase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
a__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(__lowercase , __lowercase ):
a__ : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a__ : Dict = [dl_manager.iter_files(__lowercase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowercase , gen_kwargs={"""files""": files} ) )
return splits
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a__ : Tuple = table_cast(__lowercase , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[Any]:
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(__lowercase ) ):
with open(__lowercase , """rb""" ) as f:
a__ : str = pa.Table.from_pandas(pd.read_pickle(__lowercase ) )
yield i, self._cast_table(__lowercase )
| 170 | 1 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowerCAmelCase__ : Optional[Any] ='src/transformers'
lowerCAmelCase__ : int ='docs/source/en/tasks'
def a__ ( A__, A__, A__ ):
with open(A__, 'r', encoding='utf-8', newline='\n' ) as f:
SCREAMING_SNAKE_CASE_ : Tuple = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE_ : Any = 0
while not lines[start_index].startswith(A__ ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ : int = start_index
while not lines[end_index].startswith(A__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ : Any =direct_transformers_import(TRANSFORMERS_PATH)
lowerCAmelCase__ : Dict ={
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowerCAmelCase__ : Union[str, Any] ={
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TASK_GUIDE_TO_MODELS[task_guide]
SCREAMING_SNAKE_CASE_ : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(A__, set() )
SCREAMING_SNAKE_CASE_ : Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def a__ ( A__, A__=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = _find_text_in_file(
filename=os.path.join(A__, A__ ), start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->', end_prompt='<!--End of the generated tip-->', )
SCREAMING_SNAKE_CASE_ : str = get_model_list_for_task(A__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(A__, A__ ), 'w', encoding='utf-8', newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
' to fix this.' )
if __name__ == "__main__":
lowerCAmelCase__ : int =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase__ : Union[str, Any] =parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 162 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = size if size is not None else {'shortest_edge': 1_8}
SCREAMING_SNAKE_CASE_ : Optional[Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
SCREAMING_SNAKE_CASE_ : int = parent
SCREAMING_SNAKE_CASE_ : str = batch_size
SCREAMING_SNAKE_CASE_ : str = num_channels
SCREAMING_SNAKE_CASE_ : List[Any] = image_size
SCREAMING_SNAKE_CASE_ : str = min_resolution
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_resolution
SCREAMING_SNAKE_CASE_ : int = do_resize
SCREAMING_SNAKE_CASE_ : List[Any] = size
SCREAMING_SNAKE_CASE_ : Optional[int] = do_center_crop
SCREAMING_SNAKE_CASE_ : Any = crop_size
SCREAMING_SNAKE_CASE_ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE_ : List[str] = image_mean
SCREAMING_SNAKE_CASE_ : Optional[int] = image_std
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = LevitImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_std' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_center_crop' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'size' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : str = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 162 | 1 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
A__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __lowerCAmelCase ( lowerCamelCase__ ):
def __init__( self , **_snake_case ):
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowerCamelCase__ )
def snake_case ( self , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = {}
_lowerCAmelCase = {}
_lowerCAmelCase = {}
# preprocess args
if "points_per_batch" in kwargs:
_lowerCAmelCase = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
_lowerCAmelCase = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
_lowerCAmelCase = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
_lowerCAmelCase = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
_lowerCAmelCase = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
_lowerCAmelCase = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
_lowerCAmelCase = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
_lowerCAmelCase = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
_lowerCAmelCase = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
_lowerCAmelCase = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
_lowerCAmelCase = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
_lowerCAmelCase = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , _snake_case , *_snake_case , _snake_case=None , _snake_case=None , **_snake_case ):
"""simple docstring"""
return super().__call__(lowerCamelCase__ , *lowerCamelCase__ , num_workers=lowerCamelCase__ , batch_size=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case ( self , _snake_case , _snake_case=64 , _snake_case = 0 , _snake_case = 512 / 1500 , _snake_case = 32 , _snake_case = 1 , ):
"""simple docstring"""
_lowerCAmelCase = load_image(lowerCamelCase__ )
_lowerCAmelCase = self.image_processor.size['longest_edge']
_lowerCAmelCase = self.image_processor.generate_crop_boxes(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCAmelCase = self.image_processor(images=lowerCamelCase__ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_lowerCAmelCase = self.get_inference_context()
with inference_context():
_lowerCAmelCase = self._ensure_tensor_on_device(lowerCamelCase__ , device=self.device )
_lowerCAmelCase = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_lowerCAmelCase = image_embeddings
_lowerCAmelCase = grid_points.shape[1]
_lowerCAmelCase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCAmelCase = grid_points[:, i : i + points_per_batch, :, :]
_lowerCAmelCase = input_labels[:, i : i + points_per_batch]
_lowerCAmelCase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def snake_case ( self , _snake_case , _snake_case=0.88 , _snake_case=0.95 , _snake_case=0 , _snake_case=1 , ):
"""simple docstring"""
_lowerCAmelCase = model_inputs.pop("""input_boxes""" )
_lowerCAmelCase = model_inputs.pop("""is_last""" )
_lowerCAmelCase = model_inputs.pop("""original_sizes""" ).tolist()
_lowerCAmelCase = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_lowerCAmelCase = self.model(**lowerCamelCase__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_lowerCAmelCase = model_outputs['pred_masks']
_lowerCAmelCase = self.image_processor.post_process_masks(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , binarize=lowerCamelCase__ )
_lowerCAmelCase = model_outputs['iou_scores']
_lowerCAmelCase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def snake_case ( self , _snake_case , _snake_case=False , _snake_case=False , _snake_case=0.7 , ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_lowerCAmelCase = torch.cat(lowerCamelCase__ )
_lowerCAmelCase = torch.cat(lowerCamelCase__ )
_lowerCAmelCase = self.image_processor.post_process_for_mask_generation(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCAmelCase = defaultdict(lowerCamelCase__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCamelCase__ )
_lowerCAmelCase = {}
if output_rle_mask:
_lowerCAmelCase = rle_mask
if output_bboxes_mask:
_lowerCAmelCase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 82 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
snake_case_ : Any = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def A__ ( ):
_UpperCamelCase : Tuple = Github(os.environ['GITHUB_TOKEN'] )
_UpperCamelCase : List[Any] = g.get_repo('huggingface/diffusers' )
_UpperCamelCase : List[Any] = repo.get_issues(state='open' )
for issue in open_issues:
_UpperCamelCase : Dict = sorted(issue.get_comments() , key=lambda UpperCAmelCase_ : i.created_at , reverse=UpperCAmelCase_ )
_UpperCamelCase : List[str] = comments[0] if len(UpperCAmelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 83 | 0 |
'''simple docstring'''
from typing import Any
class A :
def __init__( self , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : List[str] = data
A : str = None
def __repr__( self ) -> List[str]:
"""simple docstring"""
return F'Node({self.data})'
class A :
def __init__( self ) -> List[str]:
"""simple docstring"""
A : List[Any] = None
def __iter__( self ) -> Any:
"""simple docstring"""
A : List[str] = self.head
while node:
yield node.data
A : int = node.next
def __len__( self ) -> Dict:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ) -> Optional[int]:
"""simple docstring"""
return "->".join([str(_lowerCamelCase ) for item in self] )
def __getitem__( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
A : List[Any] = self.head
for _ in range(_lowerCamelCase ):
A : Dict = current.next
A : Union[str, Any] = data
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
self.insert_nth(len(self ) , _lowerCamelCase )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
self.insert_nth(0 , _lowerCamelCase )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError('''list index out of range''' )
A : Optional[Any] = Node(_lowerCamelCase )
if self.head is None:
A : Tuple = new_node
elif index == 0:
A : str = self.head # link new_node to head
A : List[Any] = new_node
else:
A : str = self.head
for _ in range(index - 1 ):
A : Dict = temp.next
A : str = temp.next
A : Optional[int] = new_node
def __lowerCAmelCase ( self ) -> Any: # print every node data
"""simple docstring"""
print(self )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
return self.delete_nth(0 )
def __lowerCAmelCase ( self ) -> str: # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE = 0 ) -> Optional[Any]:
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('''List index out of range.''' )
A : Optional[Any] = self.head # default first node
if index == 0:
A : Union[str, Any] = self.head.next
else:
A : str = self.head
for _ in range(index - 1 ):
A : Optional[int] = temp.next
A : Optional[Any] = temp.next
A : Optional[Any] = temp.next.next
return delete_node.data
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
return self.head is None
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : List[Any] = None
A : Optional[int] = self.head
while current:
# Store the current node's next node.
A : int = current.next
# Make the current node's next point backwards
A : Any = prev
# Make the previous node be the current node
A : int = current
# Make the current node the next node (to progress iteration)
A : Dict = next_node
# Return prev in order to put the head at the end
A : Union[str, Any] = prev
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(__lowerCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__lowerCamelCase ) == i
linked_list.insert_nth(__lowerCamelCase , i + 1 )
assert str(__lowerCamelCase ) == "->".join(str(__lowerCamelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__lowerCamelCase ) == "->".join(str(__lowerCamelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__lowerCamelCase ) == 9
assert str(__lowerCamelCase ) == "->".join(str(__lowerCamelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
A : Optional[int] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__lowerCamelCase ) == "->".join(str(__lowerCamelCase ) for i in range(-8 , 1 ) )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : List[Any] = [
-9,
100,
Node(7734_5112 ),
'''dlrow olleH''',
7,
5555,
0,
-1_92.5_55_55,
'''Hello, world!''',
77.9,
Node(10 ),
None,
None,
12.20,
]
A : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(__lowerCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__lowerCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
A : Optional[int] = linked_list.delete_head()
assert result == -9
assert (
str(__lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
A : int = linked_list.delete_tail()
assert result == 12.2
assert (
str(__lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
A : Union[str, Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(__lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(__lowerCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__lowerCamelCase )
assert (
str(__lowerCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__lowerCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCAmelCase_ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
A : str = LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(__lowerCamelCase )
print('''\nReading/changing Node data using indexing:''' )
print(F'Element at Position 1: {linked_list[1]}' )
A : int = input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(__lowerCamelCase )
print(F'length of linked_list is : {len(__lowerCamelCase )}' )
if __name__ == "__main__":
main()
| 368 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
A : Dict = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE ):
A : List[Any] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
A : Optional[int] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
A : str = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A : Any = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A : int = self.scheduler.step(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , use_clipped_model_output=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
A : Dict = (image / 2 + 0.5).clamp(0 , 1 )
A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A : int = self.numpy_to_pil(SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
| 311 | 0 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
a = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
a = [ord(letter) for letter in string.ascii_lowercase]
a = {ord(char) for char in VALID_CHARS}
a = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase (snake_case__ : list[int] , snake_case__ : tuple[int, ...] ) -> str | None:
'''simple docstring'''
lowerCAmelCase = ""
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ):
lowerCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case__ )
return decoded
def lowercase (snake_case__ : list[int] ) -> list[str]:
'''simple docstring'''
lowerCAmelCase = []
for key in product(snake_case__ , repeat=3 ):
lowerCAmelCase = try_key(snake_case__ , snake_case__ )
if encoded is not None:
possibles.append(snake_case__ )
return possibles
def lowercase (snake_case__ : list[str] , snake_case__ : str ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase (snake_case__ : str = "p059_cipher.txt" ) -> int:
'''simple docstring'''
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding="""utf-8""" )
lowerCAmelCase = [int(snake_case__ ) for number in data.strip().split(""",""" )]
lowerCAmelCase = filter_valid_chars(snake_case__ )
for common_word in COMMON_WORDS:
lowerCAmelCase = filter_common_word(snake_case__ , snake_case__ )
if len(snake_case__ ) == 1:
break
lowerCAmelCase = possibles[0]
return sum(ord(snake_case__ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 155 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
a = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowercase (snake_case__ : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
'''simple docstring'''
lowerCAmelCase = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
lowerCAmelCase = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
lowerCAmelCase = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 155 | 1 |
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase__ ( __lowercase , __lowercase ):
a__ : List[str] = """pixel_values"""
a__ : Any = False
a__ : str = TimmBackboneConfig
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
requires_backends(self , '''timm''' )
super().__init__(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(SCREAMING_SNAKE_CASE__ , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
__lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , '''use_pretrained_backbone''' , SCREAMING_SNAKE_CASE__ )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
__lowerCamelCase = config.out_indices if getattr(SCREAMING_SNAKE_CASE__ , '''out_indices''' , SCREAMING_SNAKE_CASE__ ) is not None else (-1,)
__lowerCamelCase = timm.create_model(
config.backbone , pretrained=SCREAMING_SNAKE_CASE__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__lowerCamelCase = self._backbone.return_layers
__lowerCamelCase = {layer['''module''']: str(SCREAMING_SNAKE_CASE__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(SCREAMING_SNAKE_CASE__ )
@classmethod
def __A ( cls : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
__lowerCamelCase = kwargs.pop('''config''' , TimmBackboneConfig() )
__lowerCamelCase = kwargs.pop('''use_timm_backbone''' , SCREAMING_SNAKE_CASE__ )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
__lowerCamelCase = kwargs.pop('''num_channels''' , config.num_channels )
__lowerCamelCase = kwargs.pop('''features_only''' , config.features_only )
__lowerCamelCase = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
__lowerCamelCase = kwargs.pop('''out_indices''' , config.out_indices )
__lowerCamelCase = TimmBackboneConfig(
backbone=SCREAMING_SNAKE_CASE__ , num_channels=SCREAMING_SNAKE_CASE__ , features_only=SCREAMING_SNAKE_CASE__ , use_pretrained_backbone=SCREAMING_SNAKE_CASE__ , out_indices=SCREAMING_SNAKE_CASE__ , )
return super()._from_config(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
pass
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , **SCREAMING_SNAKE_CASE__ : str ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
__lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__lowerCamelCase = self._all_layers
__lowerCamelCase = self._backbone(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self._return_layers
__lowerCamelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
__lowerCamelCase = self._backbone(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = None
__lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) if hidden_states is not None else None
if not return_dict:
__lowerCamelCase = (feature_maps,)
if output_hidden_states:
__lowerCamelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ , attentions=SCREAMING_SNAKE_CASE__ )
| 351 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __A ( self : Optional[int] ) -> Union[str, Any]:
__lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
__lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
__lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
__lowerCamelCase = shift_tokens_right(SCREAMING_SNAKE_CASE__ , model.config.pad_token_id , model.config.decoder_start_token_id )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ).logits
__lowerCamelCase = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE__ , onehot(SCREAMING_SNAKE_CASE__ , logits.shape[-1] ) ).mean()
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 339 | 0 |
from sklearn.metrics import matthews_corrcoef
import datasets
lowerCAmelCase = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
lowerCAmelCase = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
lowerCAmelCase = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def _A (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
] , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ):
return {
"matthews_correlation": float(matthews_corrcoef(lowerCAmelCase , lowerCAmelCase , sample_weight=lowerCAmelCase ) ),
}
| 295 |
from math import isqrt
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) )
def _lowerCamelCase( lowercase__ = 1_0**6 ) -> int:
'''simple docstring'''
__lowercase= 0
__lowercase= 1
__lowercase= 7
while prime_candidate < max_prime:
primes_count += is_prime(lowercase__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 295 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Dict=False ) -> Tuple:
"""simple docstring"""
__magic_name__ : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'module.blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'module.blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'module.blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'module.blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'module.blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'module.blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'module.blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'module.blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'module.blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'module.blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__magic_name__ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Any=False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__magic_name__ : int = ''
else:
__magic_name__ : int = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ : List[Any] = state_dict.pop(f'module.blocks.{i}.attn.qkv.weight' )
__magic_name__ : Dict = state_dict.pop(f'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ : List[str] = in_proj_weight[
: config.hidden_size, :
]
__magic_name__ : str = in_proj_bias[: config.hidden_size]
__magic_name__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
__magic_name__ : List[Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase ( lowerCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
__magic_name__ : List[Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ : Tuple = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
__magic_name__ : Dict = dct.pop(lowerCAmelCase )
__magic_name__ : List[Any] = val
def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
__magic_name__ : List[str] = ViTMSNConfig()
__magic_name__ : List[Any] = 1000
__magic_name__ : Optional[int] = 'datasets/huggingface/label-files'
__magic_name__ : Optional[Any] = 'imagenet-1k-id2label.json'
__magic_name__ : Any = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase ) , 'r' ) )
__magic_name__ : Optional[int] = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
__magic_name__ : List[Any] = idalabel
__magic_name__ : int = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__magic_name__ : str = 384
__magic_name__ : Dict = 1536
__magic_name__ : str = 6
elif "l16" in checkpoint_url:
__magic_name__ : Tuple = 1024
__magic_name__ : List[str] = 4096
__magic_name__ : Union[str, Any] = 24
__magic_name__ : Optional[int] = 16
__magic_name__ : List[str] = 0.1
elif "b4" in checkpoint_url:
__magic_name__ : Optional[int] = 4
elif "l7" in checkpoint_url:
__magic_name__ : Tuple = 7
__magic_name__ : str = 1024
__magic_name__ : int = 4096
__magic_name__ : List[str] = 24
__magic_name__ : Tuple = 16
__magic_name__ : Dict = 0.1
__magic_name__ : Optional[int] = ViTMSNModel(lowerCAmelCase )
__magic_name__ : Dict = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location='cpu' )['target_encoder']
__magic_name__ : Union[str, Any] = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCAmelCase )
__magic_name__ : Any = create_rename_keys(lowerCAmelCase , base_model=lowerCAmelCase )
for src, dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
read_in_q_k_v(lowerCAmelCase , lowerCAmelCase , base_model=lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
model.eval()
__magic_name__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__magic_name__ : Optional[Any] = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
__magic_name__ : Optional[Any] = ViTImageProcessor(
size=config.image_size , image_mean=lowerCAmelCase , image_std=lowerCAmelCase )
__magic_name__ : Any = image_processor(images=lowerCAmelCase , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
__magic_name__ : str = model(**lowerCAmelCase )
__magic_name__ : Tuple = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__magic_name__ : Dict = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
__magic_name__ : Tuple = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
__magic_name__ : str = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
__magic_name__ : Union[str, Any] = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
__magic_name__ : Optional[Any] = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCAmelCase , atol=1e-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase :str = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 366 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase :Tuple = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : int = ["""pixel_values"""]
def __init__( self : Any , _A : bool = True , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[Any] , ) -> None:
super().__init__(**_A )
__magic_name__ : List[str] = size if size is not None else {'shortest_edge': 256}
__magic_name__ : str = get_size_dict(_A , default_to_square=_A )
__magic_name__ : List[str] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__magic_name__ : Optional[int] = get_size_dict(_A )
__magic_name__ : Union[str, Any] = do_resize
__magic_name__ : List[Any] = size
__magic_name__ : List[str] = resample
__magic_name__ : Dict = do_center_crop
__magic_name__ : List[str] = crop_size
__magic_name__ : int = do_rescale
__magic_name__ : Tuple = rescale_factor
__magic_name__ : List[str] = do_normalize
__magic_name__ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__magic_name__ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ) -> np.ndarray:
__magic_name__ : Optional[Any] = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__magic_name__ : Dict = get_resize_output_image_size(_A , size=size['shortest_edge'] , default_to_square=_A )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ) -> np.ndarray:
__magic_name__ : int = get_size_dict(_A )
return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A )
def __lowerCAmelCase ( self : List[str] , _A : np.ndarray , _A : float , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple ) -> np.ndarray:
return rescale(_A , scale=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray:
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : List[str] , _A : ImageInput , _A : Optional[bool] = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_A : List[Any] , ) -> List[str]:
__magic_name__ : int = do_resize if do_resize is not None else self.do_resize
__magic_name__ : Tuple = size if size is not None else self.size
__magic_name__ : Optional[Any] = get_size_dict(_A , default_to_square=_A )
__magic_name__ : Dict = resample if resample is not None else self.resample
__magic_name__ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__magic_name__ : Dict = crop_size if crop_size is not None else self.crop_size
__magic_name__ : List[str] = get_size_dict(_A )
__magic_name__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ : Any = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ : Tuple = image_mean if image_mean is not None else self.image_mean
__magic_name__ : Union[str, Any] = image_std if image_std is not None else self.image_std
__magic_name__ : int = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__magic_name__ : List[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
__magic_name__ : Union[str, Any] = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
__magic_name__ : Union[str, Any] = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
__magic_name__ : List[Any] = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__magic_name__ : Optional[Any] = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__magic_name__ : Union[str, Any] = [to_channel_dimension_format(_A , _A ) for image in images]
__magic_name__ : List[str] = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A ) | 275 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : list , _snake_case : int = 0 ) -> list:
'''simple docstring'''
_A = length or len(_snake_case )
_A = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_A , _A = list_data[i + 1], list_data[i]
_A = True
return list_data if not swapped else bubble_sort(_snake_case , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : tuple[int, int] , _snake_case : int ) -> list[tuple[int, int]]:
'''simple docstring'''
_A , _A = position
_A = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_A = []
for position in positions:
_A , _A = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_snake_case )
return permissible_positions
def _snake_case ( _snake_case : list[list[int]] ) -> bool:
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def _snake_case ( _snake_case : list[list[int]] , _snake_case : tuple[int, int] , _snake_case : int ) -> bool:
'''simple docstring'''
if is_complete(_snake_case ):
return True
for position in get_valid_pos(_snake_case , len(_snake_case ) ):
_A , _A = position
if board[y][x] == 0:
_A = curr + 1
if open_knight_tour_helper(_snake_case , _snake_case , curr + 1 ):
return True
_A = 0
return False
def _snake_case ( _snake_case : int ) -> list[list[int]]:
'''simple docstring'''
_A = [[0 for i in range(_snake_case )] for j in range(_snake_case )]
for i in range(_snake_case ):
for j in range(_snake_case ):
_A = 1
if open_knight_tour_helper(_snake_case , (i, j) , 1 ):
return board
_A = 0
_A = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : int = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""OwlViTFeatureExtractor"""]
__lowerCamelCase : Optional[int] = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 286 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : str , __A : Optional[Any]=1_3 , __A : Dict=7 , __A : List[str]=True , __A : Any=True , __A : str=True , __A : Optional[Any]=True , __A : List[str]=9_9 , __A : Dict=3_2 , __A : Tuple=2 , __A : Tuple=4 , __A : Dict=3_7 , __A : Tuple="gelu" , __A : Any=0.1 , __A : str=0.1 , __A : int=5_1_2 , __A : Union[str, Any]=1_6 , __A : Optional[int]=2 , __A : Union[str, Any]=0.0_2 , __A : Tuple=3 , __A : Union[str, Any]=4 , __A : Optional[int]=None , ):
snake_case__ : Optional[int] = parent
snake_case__ : Optional[Any] = 1_3
snake_case__ : int = 7
snake_case__ : Optional[int] = True
snake_case__ : Optional[Any] = True
snake_case__ : List[str] = True
snake_case__ : int = True
snake_case__ : Optional[int] = 9_9
snake_case__ : Union[str, Any] = 3_8_4
snake_case__ : Optional[Any] = 2
snake_case__ : Union[str, Any] = 4
snake_case__ : Any = 3_7
snake_case__ : Any = "gelu"
snake_case__ : str = 0.1
snake_case__ : Optional[Any] = 0.1
snake_case__ : Union[str, Any] = 5_1_2
snake_case__ : Optional[Any] = 1_6
snake_case__ : List[Any] = 2
snake_case__ : Optional[int] = 0.0_2
snake_case__ : Dict = 3
snake_case__ : Any = 4
snake_case__ : int = 1_2_8
snake_case__ : Dict = 2
snake_case__ : Any = 9
snake_case__ : List[str] = 1
snake_case__ : List[Any] = None
def _lowercase ( self : List[str] ):
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : str = None
if self.use_input_mask:
snake_case__ : str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] = None
if self.use_token_type_ids:
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Optional[Any] = None
snake_case__ : Any = None
snake_case__ : Tuple = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : int = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : int = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Dict , __A : Dict , __A : Dict , __A : Union[str, Any] , __A : Optional[int] , __A : Any , __A : Union[str, Any] , __A : Tuple ):
snake_case__ : Optional[int] = TFConvBertModel(config=__A )
snake_case__ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ : List[str] = [input_ids, input_mask]
snake_case__ : Union[str, Any] = model(__A )
snake_case__ : str = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Union[str, Any] , __A : List[Any] , __A : Any , __A : Union[str, Any] , __A : int , __A : Optional[Any] , __A : Dict , __A : Optional[int] ):
snake_case__ : List[str] = TFConvBertForMaskedLM(config=__A )
snake_case__ : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case__ : int = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Tuple , __A : Union[str, Any] , __A : List[Any] , __A : Any , __A : List[Any] , __A : List[Any] , __A : Optional[int] , __A : List[str] ):
snake_case__ : Any = self.num_labels
snake_case__ : List[Any] = TFConvBertForSequenceClassification(config=__A )
snake_case__ : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case__ : Optional[int] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : int , __A : List[Any] , __A : Union[str, Any] , __A : Optional[Any] , __A : List[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : Optional[int] ):
snake_case__ : Optional[Any] = self.num_choices
snake_case__ : Any = TFConvBertForMultipleChoice(config=__A )
snake_case__ : Optional[int] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Optional[Any] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Optional[int] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
snake_case__ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
snake_case__ : Optional[Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : List[str] , __A : Tuple , __A : str , __A : Union[str, Any] , __A : Union[str, Any] , __A : Any , __A : int , __A : Tuple ):
snake_case__ : Dict = self.num_labels
snake_case__ : str = TFConvBertForTokenClassification(config=__A )
snake_case__ : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case__ : List[str] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Optional[int] , __A : Union[str, Any] , __A : List[Any] , __A : List[str] , __A : Any , __A : Any , __A : Optional[int] , __A : Optional[Any] ):
snake_case__ : Any = TFConvBertForQuestionAnswering(config=__A )
snake_case__ : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case__ : int = model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Any ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
(
(
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
),
) : List[str] = config_and_inputs
snake_case__ : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a_ = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
a_ = False
def _lowercase ( self : int ):
snake_case__ : Optional[Any] = TFConvBertModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=__A , hidden_size=3_7 )
def _lowercase ( self : List[Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Any ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def _lowercase ( self : Dict ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def _lowercase ( self : Optional[int] ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def _lowercase ( self : Dict ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def _lowercase ( self : Dict ):
snake_case__, snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = True
snake_case__ : int = True
if hasattr(__A , "use_cache" ):
snake_case__ : Optional[Any] = True
snake_case__ : Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
snake_case__ : List[str] = getattr(self.model_tester , "key_length" , __A )
for model_class in self.all_model_classes:
snake_case__ : Tuple = self._prepare_for_class(__A , __A )
snake_case__ : List[str] = model_class(__A )
snake_case__ : List[Any] = len(model(__A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A , saved_model=__A )
snake_case__ : str = os.path.join(__A , "saved_model" , "1" )
snake_case__ : str = tf.keras.models.load_model(__A )
snake_case__ : Optional[Any] = model(__A )
if self.is_encoder_decoder:
snake_case__ : Tuple = outputs["encoder_hidden_states"]
snake_case__ : str = outputs["encoder_attentions"]
else:
snake_case__ : Dict = outputs["hidden_states"]
snake_case__ : Tuple = outputs["attentions"]
self.assertEqual(len(__A ) , __A )
snake_case__ : int = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__A ) , __A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _lowercase ( self : Tuple ):
snake_case__ : Optional[Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__A )
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Optional[Any] = True
snake_case__ : List[Any] = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
snake_case__ : int = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
snake_case__ : Any = getattr(self.model_tester , "key_length" , __A )
snake_case__ : List[Any] = getattr(self.model_tester , "key_length" , __A )
def check_decoder_attentions_output(__A : Optional[int] ):
snake_case__ : Optional[Any] = len(__A )
self.assertEqual(out_len % 2 , 0 )
snake_case__ : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__A : Any ):
snake_case__ : List[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = True
snake_case__ : Any = False
snake_case__ : Dict = model_class(__A )
snake_case__ : List[Any] = model(self._prepare_for_class(__A , __A ) )
snake_case__ : Dict = len(__A )
self.assertEqual(config.output_hidden_states , __A )
check_encoder_attentions_output(__A )
if self.is_encoder_decoder:
snake_case__ : str = model_class(__A )
snake_case__ : List[Any] = model(self._prepare_for_class(__A , __A ) )
self.assertEqual(config.output_hidden_states , __A )
check_decoder_attentions_output(__A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case__ : Optional[int] = True
snake_case__ : Optional[Any] = model_class(__A )
snake_case__ : Union[str, Any] = model(self._prepare_for_class(__A , __A ) )
self.assertEqual(config.output_hidden_states , __A )
check_encoder_attentions_output(__A )
# Check attention is always last and order is fine
snake_case__ : Optional[int] = True
snake_case__ : List[Any] = True
snake_case__ : Any = model_class(__A )
snake_case__ : str = model(self._prepare_for_class(__A , __A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__A ) )
self.assertEqual(model.config.output_hidden_states , __A )
check_encoder_attentions_output(__A )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : int ):
snake_case__ : int = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
snake_case__ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case__ : str = model(__A )[0]
snake_case__ : int = [1, 6, 7_6_8]
self.assertEqual(output.shape , __A )
snake_case__ : List[Any] = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __A , atol=1e-4 )
| 286 | 1 |
import math
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [True] * n
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : str = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
SCREAMING_SNAKE_CASE_ : Any = i * 2
while index < n:
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Any = index + i
SCREAMING_SNAKE_CASE_ : Dict = [2]
for i in range(3 , lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(lowerCAmelCase )
return primes
def _snake_case ( lowerCAmelCase : int = 9_9_9_9_6_6_6_6_3_3_3_3 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = math.floor(math.sqrt(lowerCAmelCase ) ) + 1_0_0
SCREAMING_SNAKE_CASE_ : List[str] = prime_sieve(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Tuple = primes[prime_index]
while (last_prime**2) <= limit:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = primes[prime_index + 1]
SCREAMING_SNAKE_CASE_ : Dict = last_prime**2
SCREAMING_SNAKE_CASE_ : Dict = next_prime**2
# Get numbers divisible by lps(current)
SCREAMING_SNAKE_CASE_ : str = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
SCREAMING_SNAKE_CASE_ : Optional[int] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
SCREAMING_SNAKE_CASE_ : List[str] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 18 |
def _lowerCamelCase( lowercase__ , lowercase__ = " " ) -> list:
'''simple docstring'''
__lowercase= []
__lowercase= 0
for index, char in enumerate(lowercase__ ):
if char == separator:
split_words.append(string[last_index:index] )
__lowercase= index + 1
elif index + 1 == len(lowercase__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 295 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __magic_name__ ( snake_case ):
def __init__( self , _lowercase , _lowercase )-> int:
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowercase , scheduler=_lowercase )
@torch.no_grad()
def __call__( self , _lowercase = 1 , _lowercase = None , _lowercase = 0.0 , _lowercase = 50 , _lowercase = None , _lowercase = "pil" , _lowercase = True , )-> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , _lowercase ):
UpperCamelCase_ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
UpperCamelCase_ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
UpperCamelCase_ = randn_tensor(_lowercase , generator=_lowercase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase_ = self.unet(_lowercase , _lowercase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase_ = self.scheduler.step(
_lowercase , _lowercase , _lowercase , eta=_lowercase , use_clipped_model_output=_lowercase , generator=_lowercase ).prev_sample
UpperCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase_ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 350 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :int = KandinskyVaaImgaImgPipeline
UpperCamelCase_ :Union[str, Any] = ["""image_embeds""", """negative_image_embeds""", """image"""]
UpperCamelCase_ :Dict = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
UpperCamelCase_ :Tuple = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase_ :int = False
@property
def UpperCAmelCase_ ( self )-> List[str]:
return 32
@property
def UpperCAmelCase_ ( self )-> List[Any]:
return 32
@property
def UpperCAmelCase_ ( self )-> Tuple:
return self.time_input_dim
@property
def UpperCAmelCase_ ( self )-> Optional[Any]:
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self )-> Any:
return 100
@property
def UpperCAmelCase_ ( self )-> Tuple:
torch.manual_seed(0 )
UpperCamelCase_ = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase_ = UNetaDConditionModel(**_lowercase )
return model
@property
def UpperCAmelCase_ ( self )-> List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self )-> Any:
torch.manual_seed(0 )
UpperCamelCase_ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self )-> Tuple:
UpperCamelCase_ = self.dummy_unet
UpperCamelCase_ = self.dummy_movq
UpperCamelCase_ = {
"num_train_timesteps": 1_000,
"beta_schedule": "linear",
"beta_start": 0.00_085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
UpperCamelCase_ = DDIMScheduler(**_lowercase )
UpperCamelCase_ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase_ ( self , _lowercase , _lowercase=0 )-> Tuple:
UpperCamelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowercase ) ).to(_lowercase )
UpperCamelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowercase )
# create init_image
UpperCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase )
UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase_ = Image.fromarray(np.uinta(_lowercase ) ).convert("RGB" ).resize((256, 256) )
if str(_lowercase ).startswith("mps" ):
UpperCamelCase_ = torch.manual_seed(_lowercase )
else:
UpperCamelCase_ = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
UpperCamelCase_ = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = "cpu"
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = self.pipeline_class(**_lowercase )
UpperCamelCase_ = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = pipe(**self.get_dummy_inputs(_lowercase ) )
UpperCamelCase_ = output.images
UpperCamelCase_ = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
UpperCamelCase_ = image[0, -3:, -3:, -1]
UpperCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_ = np.array(
[0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self )-> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
UpperCamelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCamelCase_ = "A red cartoon frog, 4k"
UpperCamelCase_ = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowercase )
UpperCamelCase_ = KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCamelCase_ = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase_ , UpperCamelCase_ = pipe_prior(
_lowercase , generator=_lowercase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase_ = pipeline(
image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , generator=_lowercase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
UpperCamelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 60 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """BlipImageProcessor"""
lowercase__ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict, lowerCamelCase : Dict, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = False
super().__init__(lowerCamelCase, lowerCamelCase )
lowercase__ = self.image_processor
def __call__( self : int, lowerCamelCase : ImageInput = None, lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, lowerCamelCase : bool = True, lowerCamelCase : Union[bool, str, PaddingStrategy] = False, lowerCamelCase : Union[bool, str, TruncationStrategy] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : int = 0, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = True, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : Any, ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowercase__ = self.tokenizer
lowercase__ = self.tokenizer(
text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
return text_encoding
# add pixel_values
lowercase__ = self.image_processor(lowerCamelCase, return_tensors=lowerCamelCase )
if text is not None:
lowercase__ = self.tokenizer(
text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
else:
lowercase__ = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase )
return encoding_image_processor
def lowercase__ ( self : Tuple, *lowerCamelCase : Union[str, Any], **lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : List[str], *lowerCamelCase : int, **lowerCamelCase : List[str] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase )
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 207 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Dict, lowerCamelCase : pyspark.sql.DataFrame, lowerCamelCase : Optional[NamedSplit] = None, lowerCamelCase : Optional[Features] = None, lowerCamelCase : bool = True, lowerCamelCase : str = None, lowerCamelCase : bool = False, lowerCamelCase : str = None, lowerCamelCase : bool = True, lowerCamelCase : str = "arrow", **lowerCamelCase : str, ):
'''simple docstring'''
super().__init__(
split=lowerCamelCase, features=lowerCamelCase, cache_dir=lowerCamelCase, keep_in_memory=lowerCamelCase, streaming=lowerCamelCase, **lowerCamelCase, )
lowercase__ = load_from_cache_file
lowercase__ = file_format
lowercase__ = Spark(
df=lowerCamelCase, features=lowerCamelCase, cache_dir=lowerCamelCase, working_dir=lowerCamelCase, **lowerCamelCase, )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowercase__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCamelCase, file_format=self._file_format, )
return self.builder.as_dataset(split=self.split )
| 207 | 1 |
from __future__ import annotations
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None):
if start is None:
SCREAMING_SNAKE_CASE = 0
if end is None:
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE = (start + end) // 2
slowsort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
slowsort(_UpperCAmelCase , mid + 1 , _UpperCAmelCase)
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sequence[mid], sequence[end]
slowsort(_UpperCAmelCase , _UpperCAmelCase , end - 1)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 371 |
import baseaa
def lowerCamelCase__ (_UpperCAmelCase):
return baseaa.aaaencode(string.encode('utf-8'))
def lowerCamelCase__ (_UpperCAmelCase):
return baseaa.aaadecode(_UpperCAmelCase).decode('utf-8')
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 | 0 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
a_ = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = "https://pypi.org/pypi/diffusers/json"
lowerCAmelCase__ = json.loads(request.urlopen(UpperCamelCase_ ).read() )["releases"].keys()
return sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : version.Version(UpperCamelCase_ ) )
def _a ( ) -> int:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(UpperCamelCase_ )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
lowerCAmelCase__ = Path(UpperCamelCase_ ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def _a ( UpperCamelCase_ : Union[str, os.PathLike] ) -> List[str]:
"""simple docstring"""
init_hf_modules()
lowerCAmelCase__ = Path(UpperCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
lowerCAmelCase__ = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def _a ( UpperCamelCase_ : Dict ) -> int:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" ) as f:
lowerCAmelCase__ = f.read()
# Imports of the form `import .xxx`
lowerCAmelCase__ = re.findall("^\s*import\s+\.(\S+)\s*$" , UpperCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , UpperCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(UpperCamelCase_ ) )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = False
lowerCAmelCase__ = [module_file]
lowerCAmelCase__ = []
# Let's recurse through all relative imports
while not no_change:
lowerCAmelCase__ = []
for f in files_to_check:
new_imports.extend(get_relative_imports(UpperCamelCase_ ) )
lowerCAmelCase__ = Path(UpperCamelCase_ ).parent
lowerCAmelCase__ = [str(module_path / m ) for m in new_imports]
lowerCAmelCase__ = [f for f in new_import_files if f not in all_relative_imports]
lowerCAmelCase__ = [F"{f}.py" for f in new_import_files]
lowerCAmelCase__ = len(UpperCamelCase_ ) == 0
all_relative_imports.extend(UpperCamelCase_ )
return all_relative_imports
def _a ( UpperCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" ) as f:
lowerCAmelCase__ = f.read()
# Imports of the form `import xxx`
lowerCAmelCase__ = re.findall("^\s*import\s+(\S+)\s*$" , UpperCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , UpperCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
lowerCAmelCase__ = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
lowerCAmelCase__ = list(set(UpperCamelCase_ ) )
lowerCAmelCase__ = []
for imp in imports:
try:
importlib.import_module(UpperCamelCase_ )
except ImportError:
missing_packages.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
F"{', '.join(UpperCamelCase_ )}. Run `pip install {' '.join(UpperCamelCase_ )}`" )
return get_relative_imports(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = module_path.replace(os.path.sep , "." )
lowerCAmelCase__ = importlib.import_module(UpperCamelCase_ )
if class_name is None:
return find_pipeline_class(UpperCamelCase_ )
return getattr(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Dict ) -> Any:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
lowerCAmelCase__ = dict(inspect.getmembers(UpperCamelCase_ , inspect.isclass ) )
lowerCAmelCase__ = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , UpperCamelCase_ )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
F" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
F" {loaded_module}." )
lowerCAmelCase__ = cls
return pipeline_class
def _a ( UpperCamelCase_ : Union[str, os.PathLike] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Union[str, os.PathLike]] = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[Dict[str, str]] = None , UpperCamelCase_ : Optional[Union[bool, str]] = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : bool = False , ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = str(UpperCamelCase_ )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if os.path.isfile(UpperCamelCase_ ):
lowerCAmelCase__ = module_file_or_url
lowerCAmelCase__ = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
lowerCAmelCase__ = get_diffusers_versions()
# cut ".dev0"
lowerCAmelCase__ = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
lowerCAmelCase__ = latest_version if latest_version[1:] in available_versions else "main"
logger.info(F"Defaulting to latest_version: {revision}." )
elif revision in available_versions:
lowerCAmelCase__ = F"v{revision}"
elif revision == "main":
lowerCAmelCase__ = revision
else:
raise ValueError(
F"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
F" {', '.join(available_versions + ['main'] )}." )
# community pipeline on GitHub
lowerCAmelCase__ = COMMUNITY_PIPELINES_URL.format(revision=UpperCamelCase_ , pipeline=UpperCamelCase_ )
try:
lowerCAmelCase__ = cached_download(
UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , proxies=UpperCamelCase_ , resume_download=UpperCamelCase_ , local_files_only=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , )
lowerCAmelCase__ = "git"
lowerCAmelCase__ = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
else:
try:
# Load from URL or cache if already cached
lowerCAmelCase__ = hf_hub_download(
UpperCamelCase_ , UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , proxies=UpperCamelCase_ , resume_download=UpperCamelCase_ , local_files_only=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , )
lowerCAmelCase__ = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
# Check we have all the requirements in our environment
lowerCAmelCase__ = check_imports(UpperCamelCase_ )
# Now we move the module inside our cached dynamic modules.
lowerCAmelCase__ = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(UpperCamelCase_ )
lowerCAmelCase__ = Path(UpperCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(UpperCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
lowerCAmelCase__ = F"{module_needed}.py"
shutil.copy(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ = use_auth_token
elif use_auth_token is True:
lowerCAmelCase__ = HfFolder.get_token()
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = model_info(UpperCamelCase_ , revision=UpperCamelCase_ , token=UpperCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowerCAmelCase__ = submodule_path / commit_hash
lowerCAmelCase__ = full_submodule + os.path.sep + commit_hash
create_dynamic_module(UpperCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(UpperCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
UpperCamelCase_ , F"{module_needed}.py" , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , resume_download=UpperCamelCase_ , proxies=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , local_files_only=UpperCamelCase_ , )
return os.path.join(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Union[str, os.PathLike] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[Union[str, os.PathLike]] = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[Dict[str, str]] = None , UpperCamelCase_ : Optional[Union[bool, str]] = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : bool = False , **UpperCamelCase_ : Optional[int] , ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = get_cached_module_file(
UpperCamelCase_ , UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , resume_download=UpperCamelCase_ , proxies=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , local_files_only=UpperCamelCase_ , )
return get_class_in_module(UpperCamelCase_ , final_module.replace(".py" , "" ) )
| 340 |
import requests
from bsa import BeautifulSoup
def _a ( UpperCamelCase_ : str = "AAPL" ) -> str:
"""simple docstring"""
lowerCAmelCase__ = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
lowerCAmelCase__ = BeautifulSoup(requests.get(UpperCamelCase_ ).text , "html.parser" )
lowerCAmelCase__ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 340 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( a_: int, a_: int ):
return 1 if input_a == input_a else 0
def __UpperCAmelCase ( ):
assert xnor_gate(0, 0 ) == 1
assert xnor_gate(0, 1 ) == 0
assert xnor_gate(1, 0 ) == 0
assert xnor_gate(1, 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1)) | 367 | '''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = '''time_series_transformer'''
UpperCamelCase_ : Optional[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : str = "student_t" , lowerCAmelCase__ : str = "nll" , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase__ : Optional[Union[str, bool]] = "mean" , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : int = 3_2 , lowerCAmelCase__ : int = 3_2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : str = "gelu" , lowerCAmelCase__ : int = 6_4 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : int = 1_0_0 , lowerCAmelCase__ : float = 0.02 , lowerCAmelCase__ : Dict=True , **lowerCAmelCase__ : Tuple , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = prediction_length
_UpperCAmelCase : Optional[Any] = context_length or prediction_length
_UpperCAmelCase : Optional[Any] = distribution_output
_UpperCAmelCase : Union[str, Any] = loss
_UpperCAmelCase : Dict = input_size
_UpperCAmelCase : int = num_time_features
_UpperCAmelCase : Any = lags_sequence
_UpperCAmelCase : Dict = scaling
_UpperCAmelCase : Tuple = num_dynamic_real_features
_UpperCAmelCase : Dict = num_static_real_features
_UpperCAmelCase : Union[str, Any] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase : Optional[int] = cardinality
else:
_UpperCAmelCase : Optional[Any] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase : List[Any] = embedding_dimension
else:
_UpperCAmelCase : Optional[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_UpperCAmelCase : str = num_parallel_samples
# Transformer architecture configuration
_UpperCAmelCase : Union[str, Any] = input_size * len(lowerCAmelCase__ ) + self._number_of_features
_UpperCAmelCase : str = d_model
_UpperCAmelCase : Optional[Any] = encoder_attention_heads
_UpperCAmelCase : Dict = decoder_attention_heads
_UpperCAmelCase : List[Any] = encoder_ffn_dim
_UpperCAmelCase : str = decoder_ffn_dim
_UpperCAmelCase : Dict = encoder_layers
_UpperCAmelCase : str = decoder_layers
_UpperCAmelCase : Any = dropout
_UpperCAmelCase : str = attention_dropout
_UpperCAmelCase : List[Any] = activation_dropout
_UpperCAmelCase : Dict = encoder_layerdrop
_UpperCAmelCase : Any = decoder_layerdrop
_UpperCAmelCase : Optional[Any] = activation_function
_UpperCAmelCase : Tuple = init_std
_UpperCAmelCase : List[str] = use_cache
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 17 | 0 |
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> bool:
"""simple docstring"""
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> bool:
"""simple docstring"""
a_ : List[Any] = credit_card_number
a_ : Optional[int] = 0
a_ : str = len(__A ) - 2
for i in range(__A , -1 , -2 ):
# double the value of every second digit
a_ : Union[str, Any] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
a_ : List[Any] = cc_number[:i] + str(__A ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__A ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> bool:
"""simple docstring"""
a_ : List[Any] = F"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(F"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(__A ) <= 16:
print(F"""{error_message} of its length.""" )
return False
if not validate_initial_digits(__A ):
print(F"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(__A ):
print(F"""{error_message} it fails the Luhn check.""" )
return False
print(F"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 32 | '''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
__UpperCAmelCase =logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class a__ ( UpperCAmelCase__ ):
def __init__( self : List[str] , *a : Union[str, Any] , **a : Optional[Any] ):
"""simple docstring"""
super().__init__(*a , **a )
requires_backends(self , '''vision''' )
self.check_model_type(a )
def __call__( self : Any , a : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a : Optional[int] ):
"""simple docstring"""
return super().__call__(a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **a : Any ):
"""simple docstring"""
return {}, {}, {}
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : List[str] ):
"""simple docstring"""
__lowerCamelCase = load_image(a )
__lowerCamelCase = image.size
__lowerCamelCase = self.image_processor(images=a , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self : int , a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.model(**a )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Any ):
"""simple docstring"""
__lowerCamelCase = model_outputs.predicted_depth
__lowerCamelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a )
__lowerCamelCase = prediction.squeeze().cpu().numpy()
__lowerCamelCase = (output * 2_55 / np.max(a )).astype('''uint8''' )
__lowerCamelCase = Image.fromarray(a )
__lowerCamelCase = {}
__lowerCamelCase = predicted_depth
__lowerCamelCase = depth
return output_dict
| 67 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ):
__A : List[Any] = "focalnet"
def __init__( self : Union[str, Any] , snake_case__ : Optional[Any]=2_2_4 , snake_case__ : int=4 , snake_case__ : Tuple=3 , snake_case__ : str=9_6 , snake_case__ : List[Any]=False , snake_case__ : str=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , snake_case__ : Optional[int]=[2, 2, 6, 2] , snake_case__ : Any=[2, 2, 2, 2] , snake_case__ : List[Any]=[3, 3, 3, 3] , snake_case__ : int="gelu" , snake_case__ : str=4.0 , snake_case__ : Tuple=0.0 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Union[str, Any]=False , snake_case__ : Any=1e-4 , snake_case__ : Any=False , snake_case__ : Dict=False , snake_case__ : Optional[int]=False , snake_case__ : List[str]=0.02 , snake_case__ : Dict=1e-5 , snake_case__ : Tuple=3_2 , snake_case__ : List[str]=None , snake_case__ : Optional[int]=None , **snake_case__ : Dict , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase :Optional[int] = image_size
lowercase :Tuple = patch_size
lowercase :str = num_channels
lowercase :Any = embed_dim
lowercase :List[Any] = use_conv_embed
lowercase :int = hidden_sizes
lowercase :List[Any] = depths
lowercase :int = focal_levels
lowercase :Optional[int] = focal_windows
lowercase :Optional[Any] = hidden_act
lowercase :List[Any] = mlp_ratio
lowercase :Optional[Any] = hidden_dropout_prob
lowercase :Dict = drop_path_rate
lowercase :Tuple = use_layerscale
lowercase :List[str] = layerscale_value
lowercase :int = use_post_layernorm
lowercase :Any = use_post_layernorm_in_modulation
lowercase :List[Any] = normalize_modulator
lowercase :List[Any] = initializer_range
lowercase :str = layer_norm_eps
lowercase :List[Any] = encoder_stride
lowercase :str = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowercase , lowercase :Any = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
| 172 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class __magic_name__ ( __UpperCAmelCase ):
__A : str = "imagegpt"
__A : str = ["past_key_values"]
__A : Optional[Any] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[Any] , snake_case__ : Union[str, Any]=5_1_2 + 1 , snake_case__ : Optional[int]=3_2 * 3_2 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : List[str]=2_4 , snake_case__ : Any=8 , snake_case__ : str=None , snake_case__ : Any="quick_gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=1e-5 , snake_case__ : List[Any]=0.02 , snake_case__ : Tuple=True , snake_case__ : Dict=True , snake_case__ : str=False , snake_case__ : Optional[int]=False , snake_case__ : Union[str, Any]=False , **snake_case__ : Union[str, Any] , ):
'''simple docstring'''
lowercase :int = vocab_size
lowercase :str = n_positions
lowercase :List[str] = n_embd
lowercase :int = n_layer
lowercase :List[str] = n_head
lowercase :Tuple = n_inner
lowercase :Tuple = activation_function
lowercase :Optional[Any] = resid_pdrop
lowercase :Tuple = embd_pdrop
lowercase :Dict = attn_pdrop
lowercase :List[Any] = layer_norm_epsilon
lowercase :List[Any] = initializer_range
lowercase :List[Any] = scale_attn_weights
lowercase :Dict = use_cache
lowercase :List[str] = scale_attn_by_inverse_layer_idx
lowercase :List[str] = reorder_and_upcast_attn
lowercase :Dict = tie_word_embeddings
super().__init__(tie_word_embeddings=snake_case__ , **snake_case__ )
class __magic_name__ ( __UpperCAmelCase ):
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __snake_case ( self : Union[str, Any] , snake_case__ : "FeatureExtractionMixin" , snake_case__ : int = 1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional["TensorType"] = None , snake_case__ : int = 3 , snake_case__ : int = 3_2 , snake_case__ : int = 3_2 , ):
'''simple docstring'''
lowercase :Union[str, Any] = self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase :List[str] = dict(preprocessor(images=snake_case__ , return_tensors=snake_case__ ) )
return inputs
| 172 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCamelCase_ : List[str] = None
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase_ : str = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
lowerCamelCase_ : List[Any] = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
lowerCamelCase_ : List[Any] = '▁'
# Segments (not really needed)
lowerCamelCase_ : Tuple = 0
lowerCamelCase_ : Any = 1
lowerCamelCase_ : Optional[int] = 2
lowerCamelCase_ : List[str] = 3
lowerCamelCase_ : List[Any] = 4
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : List[str] = VOCAB_FILES_NAMES
lowercase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Union[str, Any] = """left"""
lowercase_ : Any = XLNetTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=True , snake_case_=False , snake_case_="<s>" , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<sep>" , snake_case_="<pad>" , snake_case_="<cls>" , snake_case_="<mask>" , snake_case_=["<eop>", "<eod>"] , **snake_case_ , ):
"""simple docstring"""
A_ : Union[str, Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
vocab_file=snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , additional_special_tokens=snake_case_ , **snake_case_ , )
A_ : Dict = 3
A_ : Optional[Any] = do_lower_case
A_ : Optional[Any] = remove_space
A_ : Union[str, Any] = keep_accents
A_ : Tuple = vocab_file
A_ : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None ):
"""simple docstring"""
A_ : List[Any] = [self.sep_token_id]
A_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None ):
"""simple docstring"""
A_ : Optional[Any] = [self.sep_token_id]
A_ : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A_ : List[Any] = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,) | 286 |
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : List[str] = CustomTokenizer
pass | 286 | 1 |
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_ ) -> Optional[Any]:
"""simple docstring"""
a = len(__lowerCamelCase ), len(grid[0] )
if (
min(__lowerCamelCase, __lowerCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
a = 0
count += depth_first_search(__lowerCamelCase, row + 1, __lowerCamelCase, __lowerCamelCase )
count += depth_first_search(__lowerCamelCase, row - 1, __lowerCamelCase, __lowerCamelCase )
count += depth_first_search(__lowerCamelCase, __lowerCamelCase, col + 1, __lowerCamelCase )
count += depth_first_search(__lowerCamelCase, __lowerCamelCase, col - 1, __lowerCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
UpperCamelCase__ : Union[str, Any] = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
UpperCamelCase__ : str = {
"""jukebox""": 512,
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_LYRIC_TOKENS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Union[str, Any]=["v3", "v2", "v2"] ,__lowerCamelCase : List[Any]=5_12 ,__lowerCamelCase : Tuple=5 ,__lowerCamelCase : List[Any]="<|endoftext|>" ,**__lowerCamelCase : List[str] ,):
'''simple docstring'''
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else unk_token
super().__init__(
unk_token=__lowerCamelCase ,n_genres=__lowerCamelCase ,version=__lowerCamelCase ,max_n_lyric_tokens=__lowerCamelCase ,**__lowerCamelCase ,)
a = version
a = max_n_lyric_tokens
a = n_genres
with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle:
a = json.load(__lowerCamelCase )
with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle:
a = json.load(__lowerCamelCase )
with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle:
a = json.load(__lowerCamelCase )
a = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
a = oov.replace(r'''\-\'''' ,r'''\-+\'''' )
a = regex.compile(__lowerCamelCase )
a = {v: k for k, v in self.artists_encoder.items()}
a = {v: k for k, v in self.genres_encoder.items()}
a = {v: k for k, v in self.lyrics_encoder.items()}
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
return dict(self.artists_encoder ,self.genres_encoder ,self.lyrics_encoder )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ):
'''simple docstring'''
a = [self.artists_encoder.get(__lowerCamelCase ,0 ) for artist in list_artists]
for genres in range(len(__lowerCamelCase ) ):
a = [self.genres_encoder.get(__lowerCamelCase ,0 ) for genre in list_genres[genres]]
a = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
a = [[self.lyrics_encoder.get(__lowerCamelCase ,0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : List[str] ):
'''simple docstring'''
return list(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Any ,__lowerCamelCase : Optional[int] ,**__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
a , a , a = self.prepare_for_tokenization(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
a = self._tokenize(__lowerCamelCase )
return artist, genre, lyrics
def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : bool = False ):
'''simple docstring'''
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
a = artists[idx].lower()
a = [genres[idx].lower()]
else:
a = self._normalize(artists[idx] ) + '''.v2'''
a = [
self._normalize(__lowerCamelCase ) + '''.v2''' for genre in genres[idx].split('''_''' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
a = regex.compile(r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' )
a = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
a = {vocab[index]: index + 1 for index in range(len(__lowerCamelCase ) )}
a = 0
a = len(__lowerCamelCase ) + 1
a = self.vocab
a = {v: k for k, v in self.vocab.items()}
a = ''''''
else:
a = regex.compile(r'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' )
a = self._run_strip_accents(__lowerCamelCase )
a = lyrics.replace('''\\''' ,'''\n''' )
a = self.out_of_vocab.sub('''''' ,__lowerCamelCase ), [], []
return artists, genres, lyrics
def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : int ):
'''simple docstring'''
a = unicodedata.normalize('''NFD''' ,__lowerCamelCase )
a = []
for char in text:
a = unicodedata.category(__lowerCamelCase )
if cat == "Mn":
continue
output.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : str ):
'''simple docstring'''
a = (
[chr(__lowerCamelCase ) for i in range(ord('''a''' ) ,ord('''z''' ) + 1 )]
+ [chr(__lowerCamelCase ) for i in range(ord('''A''' ) ,ord('''Z''' ) + 1 )]
+ [chr(__lowerCamelCase ) for i in range(ord('''0''' ) ,ord('''9''' ) + 1 )]
+ ['''.''']
)
a = frozenset(__lowerCamelCase )
a = re.compile(r'''_+''' )
a = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] )
a = pattern.sub('''_''' ,__lowerCamelCase ).strip('''_''' )
return text
def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : List[str] ):
'''simple docstring'''
return " ".join(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : str ,__lowerCamelCase : Optional[Union[str, TensorType]] = None ,__lowerCamelCase : bool = False ):
'''simple docstring'''
if not isinstance(__lowerCamelCase ,__lowerCamelCase ):
a = TensorType(__lowerCamelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' )
import tensorflow as tf
a = tf.constant
a = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' )
import torch
a = torch.tensor
a = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' )
import jax.numpy as jnp # noqa: F811
a = jnp.array
a = _is_jax
else:
a = np.asarray
a = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
a = [inputs]
if not is_tensor(__lowerCamelCase ):
a = as_tensor(__lowerCamelCase )
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' )
return inputs
def __call__( self : Tuple ,__lowerCamelCase : Tuple ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : List[str]="" ,__lowerCamelCase : List[Any]="pt" ):
'''simple docstring'''
a = [0, 0, 0]
a = [artist] * len(self.version )
a = [genres] * len(self.version )
a , a , a = self.tokenize(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
a , a , a = self._convert_token_to_id(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
a = [-INFINITY] * len(full_tokens[-1] )
a = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] ,tensor_type=__lowerCamelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} )
def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : str ,__lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] )
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.artists_encoder ,ensure_ascii=__lowerCamelCase ) )
a = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] )
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.genres_encoder ,ensure_ascii=__lowerCamelCase ) )
a = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] )
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.lyrics_encoder ,ensure_ascii=__lowerCamelCase ) )
return (artists_file, genres_file, lyrics_file)
def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : str ):
'''simple docstring'''
a = self.artists_decoder.get(__lowerCamelCase )
a = [self.genres_decoder.get(__lowerCamelCase ) for genre in genres_index]
a = [self.lyrics_decoder.get(__lowerCamelCase ) for character in lyric_index]
return artist, genres, lyrics
| 330 | 0 |
import numpy as np
def lowerCamelCase__ ( a__ : Tuple , a__ : int , a__ : int , a__ : Optional[Any] , a__ : Union[str, Any] ) -> Optional[int]:
UpperCamelCase_ = int(np.ceil((x_end - xa) / h ) )
UpperCamelCase_ = np.zeros((n + 1,) )
UpperCamelCase_ = ya
UpperCamelCase_ = xa
for k in range(a__ ):
UpperCamelCase_ = f(a__ , y[k] )
UpperCamelCase_ = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCamelCase_ = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCamelCase_ = f(x + h , y[k] + h * ka )
UpperCamelCase_ = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 122 |
_A = {
'''meter''': '''m''',
'''kilometer''': '''km''',
'''megametre''': '''Mm''',
'''gigametre''': '''Gm''',
'''terametre''': '''Tm''',
'''petametre''': '''Pm''',
'''exametre''': '''Em''',
'''zettametre''': '''Zm''',
'''yottametre''': '''Ym''',
}
# Exponent of the factor(meter)
_A = {
'''m''': 0,
'''km''': 3,
'''Mm''': 6,
'''Gm''': 9,
'''Tm''': 12,
'''Pm''': 15,
'''Em''': 18,
'''Zm''': 21,
'''Ym''': 24,
}
def lowerCamelCase__ ( a__ : float , a__ : str , a__ : str ) -> float:
UpperCamelCase_ = from_type.lower().strip("""s""" )
UpperCamelCase_ = to_type.lower().strip("""s""" )
UpperCamelCase_ = UNIT_SYMBOL.get(a__ , a__ )
UpperCamelCase_ = UNIT_SYMBOL.get(a__ , a__ )
if from_sanitized not in METRIC_CONVERSION:
UpperCamelCase_ = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(a__ )}'''
)
raise ValueError(a__ )
if to_sanitized not in METRIC_CONVERSION:
UpperCamelCase_ = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(a__ )}'''
)
raise ValueError(a__ )
UpperCamelCase_ = METRIC_CONVERSION[from_sanitized]
UpperCamelCase_ = METRIC_CONVERSION[to_sanitized]
UpperCamelCase_ = 1
if from_exponent > to_exponent:
UpperCamelCase_ = from_exponent - to_exponent
else:
UpperCamelCase_ = -(to_exponent - from_exponent)
return value * pow(10 , a__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 122 | 1 |
def __A ( __lowerCamelCase = 200_0000 ) -> int:
a = [0 for i in range(n + 1 )]
a = 1
a = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __lowerCamelCase ):
a = 1
a = 0
for i in range(__lowerCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'{solution() = }')
| 347 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
UpperCamelCase__ = '''nat'''
UpperCamelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :Any , __magic_name__ :int=4 , __magic_name__ :Dict=3 , __magic_name__ :List[str]=64 , __magic_name__ :Optional[int]=[3, 4, 6, 5] , __magic_name__ :int=[2, 4, 8, 16] , __magic_name__ :str=7 , __magic_name__ :Tuple=3.0 , __magic_name__ :Dict=True , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.1 , __magic_name__ :Optional[Any]="gelu" , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :Tuple=1E-5 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :int=None , __magic_name__ :Any=None , **__magic_name__ :Dict , ):
'''simple docstring'''
super().__init__(**__magic_name__ )
a = patch_size
a = num_channels
a = embed_dim
a = depths
a = len(__magic_name__ )
a = num_heads
a = kernel_size
a = mlp_ratio
a = qkv_bias
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = drop_path_rate
a = hidden_act
a = layer_norm_eps
a = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) )
a = layer_scale_init_value
a = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(__magic_name__ ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
| 347 | 1 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : str, lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = 3
lowercase__ = 250
lowercase__ = ids_tensor((batch_size, length), _A )
lowercase__ = torch.ones((batch_size, length), device=_A, dtype=torch.float ) / length
return input_ids, scores
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self._get_tensors(5 )
lowercase__ = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(_A, _A ) )
lowercase__ = self._get_tensors(9 )
self.assertFalse(criteria(_A, _A ) )
lowercase__ = self._get_tensors(10 )
self.assertTrue(criteria(_A, _A ) )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = MaxLengthCriteria(max_length=10 )
lowercase__ = self._get_tensors(5 )
self.assertFalse(criteria(_A, _A ) )
lowercase__ = self._get_tensors(9 )
self.assertFalse(criteria(_A, _A ) )
lowercase__ = self._get_tensors(10 )
self.assertTrue(criteria(_A, _A ) )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = MaxNewTokensCriteria(start_length=5, max_new_tokens=5 )
lowercase__ = self._get_tensors(5 )
self.assertFalse(criteria(_A, _A ) )
lowercase__ = self._get_tensors(9 )
self.assertFalse(criteria(_A, _A ) )
lowercase__ = self._get_tensors(10 )
self.assertTrue(criteria(_A, _A ) )
lowercase__ = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length, 10 )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self._get_tensors(5 )
lowercase__ = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(_A, _A ) )
lowercase__ = MaxTimeCriteria(max_time=0.1, initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(_A, _A ) )
def lowercase__ ( self : Dict ):
'''simple docstring'''
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ), 10 )
with self.assertWarns(_A ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ), 11 )
lowercase__ = validate_stopping_criteria(StoppingCriteriaList(), 11 )
self.assertEqual(len(_A ), 1 )
| 207 |
"""simple docstring"""
import math
lowerCamelCase__ : List[Any] = 10
lowerCamelCase__ : Optional[int] = 7
lowerCamelCase__ : Dict = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( _lowerCAmelCase : int = 20 ) -> str:
_UpperCAmelCase : List[str] = math.comb(_lowerCAmelCase, _lowerCAmelCase )
_UpperCAmelCase : Optional[int] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, _lowerCAmelCase )
_UpperCAmelCase : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 246 | 0 |
"""simple docstring"""
import math
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(UpperCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__lowerCamelCase = "Enter the base and the power separated by a comma: "
__lowerCamelCase , __lowerCamelCase = map(int, input(prompt).split(","))
__lowerCamelCase , __lowerCamelCase = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
__lowerCamelCase = res(xa, ya)
__lowerCamelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 154 | """simple docstring"""
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
A__ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(UpperCamelCase__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
@property
def __snake_case ( self : Tuple):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __snake_case ( self : List[str]):
a : Optional[int] = ort.SessionOptions()
a : str = False
return options
def __snake_case ( self : Optional[int]):
a : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
a : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
a : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
a : List[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a : int = "A red cat sitting on a park bench"
a : Optional[Any] = np.random.RandomState(0)
a : Optional[Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__UpperCAmelCase , output_type="np" , )
a : str = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1e-2
| 40 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
__UpperCAmelCase : Tuple = XGLMConfig
__UpperCAmelCase : Optional[Any] = {}
__UpperCAmelCase : Union[str, Any] = "gelu"
def __init__( self : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=1_4, UpperCAmelCase__ : str=7, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[Any]=True, UpperCAmelCase__ : int=True, UpperCAmelCase__ : List[str]=9_9, UpperCAmelCase__ : Union[str, Any]=3_2, UpperCAmelCase__ : Union[str, Any]=2, UpperCAmelCase__ : Union[str, Any]=4, UpperCAmelCase__ : Tuple=3_7, UpperCAmelCase__ : List[Any]="gelu", UpperCAmelCase__ : List[str]=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Tuple=5_1_2, UpperCAmelCase__ : Optional[Any]=0.02, ):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = d_model
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = ffn_dim
__lowercase = activation_function
__lowercase = activation_dropout
__lowercase = attention_dropout
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = None
__lowercase = 0
__lowercase = 2
__lowercase = 1
def _lowercase ( self : Union[str, Any] ):
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def _lowercase ( self : Tuple ):
__lowercase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length], self.vocab_size ), clip_value_min=0, clip_value_max=3 )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = self.get_config()
__lowercase = floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowercase ( self : List[Any] ):
return XGLMConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=UpperCAmelCase__, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=UpperCAmelCase__, )
def _lowercase ( self : Dict ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) = config_and_inputs
__lowercase = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( lowercase ,lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCAmelCase : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCAmelCase : Any = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def _lowercase ( self : Optional[Any] ):
__lowercase = TFXGLMModelTester(self )
__lowercase = ConfigTester(self, config_class=UpperCAmelCase__, n_embd=3_7 )
def _lowercase ( self : Any ):
self.config_tester.run_common_tests()
@slow
def _lowercase ( self : List[str] ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFXGLMModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def _lowercase ( self : int ):
super().test_resize_token_embeddings()
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int]=True ):
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
__lowercase = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]], dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowercase = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
__lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(), UpperCAmelCase__ )
@slow
def _lowercase ( self : List[Any] ):
__lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
__lowercase = tokenizer("Today is a nice day and", return_tensors="tf" )
__lowercase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
__lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, seed=[7, 0] )
__lowercase = tokenizer.decode(output_ids[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ )
@slow
def _lowercase ( self : Dict ):
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
__lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
__lowercase = "left"
# use different length sentences to test batching
__lowercase = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
__lowercase = tokenizer(UpperCAmelCase__, return_tensors="tf", padding=UpperCAmelCase__ )
__lowercase = inputs["input_ids"]
__lowercase = model.generate(input_ids=UpperCAmelCase__, attention_mask=inputs["attention_mask"], max_new_tokens=1_2 )
__lowercase = tokenizer(sentences[0], return_tensors="tf" ).input_ids
__lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 )
__lowercase = tokenizer(sentences[1], return_tensors="tf" ).input_ids
__lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 )
__lowercase = tokenizer.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )
__lowercase = tokenizer.decode(output_non_padded[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = tokenizer.decode(output_padded[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__, [non_padded_sentence, padded_sentence] )
| 17 | 0 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :str = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class __magic_name__ ( UpperCamelCase__ ):
UpperCamelCase_ :str = """efficientnet"""
def __init__( self , _lowercase = 3 , _lowercase = 600 , _lowercase = 2.0 , _lowercase = 3.1 , _lowercase = 8 , _lowercase = [3, 3, 5, 3, 5, 5, 3] , _lowercase = [32, 16, 24, 40, 80, 112, 192] , _lowercase = [16, 24, 40, 80, 112, 192, 320] , _lowercase = [] , _lowercase = [1, 2, 2, 2, 1, 2, 1] , _lowercase = [1, 2, 2, 3, 3, 4, 1] , _lowercase = [1, 6, 6, 6, 6, 6, 6] , _lowercase = 0.25 , _lowercase = "swish" , _lowercase = 2_560 , _lowercase = "mean" , _lowercase = 0.02 , _lowercase = 0.001 , _lowercase = 0.99 , _lowercase = 0.5 , _lowercase = 0.2 , **_lowercase , )-> Dict:
super().__init__(**__a )
UpperCamelCase_ = num_channels
UpperCamelCase_ = image_size
UpperCamelCase_ = width_coefficient
UpperCamelCase_ = depth_coefficient
UpperCamelCase_ = depth_divisor
UpperCamelCase_ = kernel_sizes
UpperCamelCase_ = in_channels
UpperCamelCase_ = out_channels
UpperCamelCase_ = depthwise_padding
UpperCamelCase_ = strides
UpperCamelCase_ = num_block_repeats
UpperCamelCase_ = expand_ratios
UpperCamelCase_ = squeeze_expansion_ratio
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dim
UpperCamelCase_ = pooling_type
UpperCamelCase_ = initializer_range
UpperCamelCase_ = batch_norm_eps
UpperCamelCase_ = batch_norm_momentum
UpperCamelCase_ = dropout_rate
UpperCamelCase_ = drop_connect_rate
UpperCamelCase_ = sum(__a ) * 4
class __magic_name__ ( UpperCamelCase__ ):
UpperCamelCase_ :Any = version.parse("""1.11""" )
@property
def UpperCAmelCase_ ( self )-> Tuple:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase_ ( self )-> List[str]:
return 1e-5
| 367 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
SCREAMING_SNAKE_CASE :Tuple = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class __magic_name__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase = None , _lowercase = None )-> Any:
UpperCamelCase_ = None
UpperCamelCase_ = os.path.abspath(os.path.join("examples" , "by_feature" ) )
UpperCamelCase_ = os.path.abspath("examples" )
for item in os.listdir(_lowercase ):
if item not in EXCLUDE_EXAMPLES:
UpperCamelCase_ = os.path.join(_lowercase , _lowercase )
if os.path.isfile(_lowercase ) and ".py" in item_path:
with self.subTest(
tested_script=_lowercase , feature_script=_lowercase , tested_section="main()" if parser_only else "training_function()" , ):
UpperCamelCase_ = compare_against_test(
os.path.join(_lowercase , _lowercase ) , _lowercase , _lowercase , _lowercase )
UpperCamelCase_ = "\n".join(_lowercase )
if special_strings is not None:
for string in special_strings:
UpperCamelCase_ = diff.replace(_lowercase , "" )
self.assertEqual(_lowercase , "" )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
self.one_complete_example("complete_nlp_example.py" , _lowercase )
self.one_complete_example("complete_nlp_example.py" , _lowercase )
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
UpperCamelCase_ = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , _lowercase , _lowercase , _lowercase )
self.one_complete_example("complete_cv_example.py" , _lowercase , _lowercase , _lowercase )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Optional[int] = False
@classmethod
def UpperCAmelCase_ ( cls )-> List[str]:
super().setUpClass()
UpperCamelCase_ = tempfile.mkdtemp()
UpperCamelCase_ = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCamelCase_ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def UpperCAmelCase_ ( cls )-> List[Any]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
UpperCamelCase_ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
UpperCamelCase_ = run_command(self._launch_args + testargs , return_stdout=_lowercase )
self.assertNotIn("epoch 0:" , _lowercase )
self.assertIn("epoch 1:" , _lowercase )
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
UpperCamelCase_ = run_command(self._launch_args + testargs , return_stdout=_lowercase )
if torch.cuda.is_available():
UpperCamelCase_ = torch.cuda.device_count()
else:
UpperCamelCase_ = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , _lowercase )
self.assertIn("epoch 1:" , _lowercase )
else:
self.assertIn("epoch 0:" , _lowercase )
self.assertIn("epoch 1:" , _lowercase )
@slow
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
UpperCamelCase_ = run_command(self._launch_args + testargs , return_stdout=_lowercase )
UpperCamelCase_ = re.findall("({.+})" , _lowercase )
UpperCamelCase_ = [r for r in results if "accuracy" in r][-1]
UpperCamelCase_ = ast.literal_eval(_lowercase )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def UpperCAmelCase_ ( self )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
UpperCamelCase_ = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_lowercase , "tracking" ) ) )
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 60 | 0 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
a : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class a ( _lowerCamelCase ):
def __init__( self : Dict , lowercase_ : AutoencoderKL , lowercase_ : CLIPTextModel , lowercase_ : CLIPTokenizer , lowercase_ : UNetaDConditionModel , lowercase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowercase_ : StableDiffusionSafetyChecker , lowercase_ : CLIPImageProcessor , ):
super().__init__()
self.register_modules(
vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , )
def A_ ( self : Optional[Any] , lowercase_ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_ )
def A_ ( self : List[str] ):
self.enable_attention_slicing(lowercase_ )
@torch.no_grad()
def __call__( self : int , lowercase_ : Union[str, List[str]] , lowercase_ : int = 512 , lowercase_ : int = 512 , lowercase_ : int = 50 , lowercase_ : float = 7.5 , lowercase_ : Optional[Union[str, List[str]]] = None , lowercase_ : Optional[int] = 1 , lowercase_ : float = 0.0 , lowercase_ : Optional[torch.Generator] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , lowercase_ : Optional[torch.FloatTensor] = None , **lowercase_ : Optional[int] , ):
if isinstance(lowercase_ , lowercase_ ):
snake_case_ = 1
elif isinstance(lowercase_ , lowercase_ ):
snake_case_ = len(lowercase_ )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_ )}." )
# get prompt text embeddings
snake_case_ = self.tokenizer(
lowercase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
snake_case_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
snake_case_ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
snake_case_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
snake_case_ ,snake_case_ ,snake_case_ = text_embeddings.shape
snake_case_ = text_embeddings.repeat(1 , lowercase_ , 1 )
snake_case_ = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case_ = 42
if negative_prompt is None:
snake_case_ = ['''''']
elif type(lowercase_ ) is not type(lowercase_ ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_ )} !="
F" {type(lowercase_ )}." )
elif isinstance(lowercase_ , lowercase_ ):
snake_case_ = [negative_prompt]
elif batch_size != len(lowercase_ ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_ )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
''' the batch size of `prompt`.''' )
else:
snake_case_ = negative_prompt
snake_case_ = text_input_ids.shape[-1]
snake_case_ = self.tokenizer(
lowercase_ , padding='''max_length''' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='''pt''' , )
snake_case_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ = uncond_embeddings.shape[1]
snake_case_ = uncond_embeddings.repeat(lowercase_ , lowercase_ , 1 )
snake_case_ = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
snake_case_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
snake_case_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
snake_case_ = torch.randn(
lowercase_ , generator=lowercase_ , device='''cpu''' , dtype=lowercase_ ).to(self.device )
snake_case_ = torch.randn(lowercase_ , generator=lowercase_ , device='''cpu''' , dtype=lowercase_ ).to(
self.device )
else:
snake_case_ = torch.randn(
lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
snake_case_ = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
snake_case_ = latents_reference.to(self.device )
snake_case_ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
snake_case_ = (latents_shape[3] - latents_shape_reference[3]) // 2
snake_case_ = (latents_shape[2] - latents_shape_reference[2]) // 2
snake_case_ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
snake_case_ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
snake_case_ = 0 if dx < 0 else dx
snake_case_ = 0 if dy < 0 else dy
snake_case_ = max(-dx , 0 )
snake_case_ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
snake_case_ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowercase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
snake_case_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case_ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case_ = {}
if accepts_eta:
snake_case_ = eta
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
snake_case_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
# predict the noise residual
snake_case_ = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_ ).sample
# perform guidance
if do_classifier_free_guidance:
snake_case_ ,snake_case_ = noise_pred.chunk(2 )
snake_case_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_ )
snake_case_ = 1 / 0.1_8215 * latents
snake_case_ = self.vae.decode(lowercase_ ).sample
snake_case_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
snake_case_ = self.feature_extractor(self.numpy_to_pil(lowercase_ ) , return_tensors='''pt''' ).to(
self.device )
snake_case_ ,snake_case_ = self.safety_checker(
images=lowercase_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
snake_case_ = None
if output_type == "pil":
snake_case_ = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_ )
| 56 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
assert isinstance(__UpperCAmelCase, __UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ = tmp_path / '''cache'''
snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ = SqlDatasetReader(
'''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase, keep_in_memory=__UpperCAmelCase ).read()
_check_sql_dataset(__UpperCAmelCase, __UpperCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
snake_case_ = tmp_path / '''cache'''
snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case_ = features.copy() if features else default_expected_features
snake_case_ = (
Features({feature: Value(__UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, features=__UpperCAmelCase, cache_dir=__UpperCAmelCase ).read()
_check_sql_dataset(__UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
with contextlib.closing(sqlitea.connect(__UpperCAmelCase ) ) as con:
snake_case_ = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
snake_case_ = tmp_path / '''cache'''
snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' )
snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read()
SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=1 ).write()
snake_case_ = iter_sql_file(__UpperCAmelCase )
snake_case_ = iter_sql_file(__UpperCAmelCase )
for rowa, rowa in zip(__UpperCAmelCase, __UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
snake_case_ = tmp_path / '''cache'''
snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' )
snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read()
SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=2 ).write()
snake_case_ = iter_sql_file(__UpperCAmelCase )
snake_case_ = iter_sql_file(__UpperCAmelCase )
for rowa, rowa in zip(__UpperCAmelCase, __UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
snake_case_ = tmp_path / '''cache'''
snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' )
snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read()
with pytest.raises(__UpperCAmelCase ):
SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=0 ).write()
| 56 | 1 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@require_torch
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused' )
UpperCAmelCase_ : Tuple = load_dataset('ashraq/esc50' )
UpperCAmelCase_ : str = dataset['train']['audio'][-1]['array']
UpperCAmelCase_ : List[str] = audio_classifier(__lowerCAmelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{'score': 0.5_01, 'label': 'Sound of a dog'}, {'score': 0.4_99, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF' )
def __UpperCAmelCase ( self ) -> int:
pass
@slow
@require_torch
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
UpperCAmelCase_ : Union[str, Any] = load_dataset('ashraq/esc50' )
UpperCAmelCase_ : List[str] = dataset['train']['audio'][-1]['array']
UpperCAmelCase_ : Union[str, Any] = audio_classifier(__lowerCAmelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'score': 0.9_99, 'label': 'Sound of a dog'},
{'score': 0.0_01, 'label': 'Sound of vaccum cleaner'},
] , )
UpperCAmelCase_ : int = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'score': 0.9_99, 'label': 'Sound of a dog'},
{'score': 0.0_01, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
UpperCAmelCase_ : Union[str, Any] = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'score': 0.9_99, 'label': 'Sound of a dog'},
{'score': 0.0_01, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF' )
def __UpperCAmelCase ( self ) -> Tuple:
pass
| 371 |
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( __snake_case : int = 1_000_000 , __snake_case : int = 10 ):
'''simple docstring'''
UpperCAmelCase_ : defaultdict = defaultdict(__snake_case )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCAmelCase_ : Union[str, Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCAmelCase_ : int = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__snake_case , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'{solution() = }')
| 145 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase=0.01 , __UpperCAmelCase=1000 ):
'''simple docstring'''
__lowerCamelCase = p_stop
__lowerCamelCase = max_length
def __iter__( self ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = False
while not stop and count < self.max_length:
yield count
count += 1
__lowerCamelCase = random.random() < self.p_stop
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ):
'''simple docstring'''
__lowerCamelCase = [
BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
for i in range(2 )
]
__lowerCamelCase = [list(__UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__UpperCAmelCase ) for shard in batch_sampler_shards] , [len(__UpperCAmelCase ) for e in expected] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of total batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of total batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__lowerCamelCase = [BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=False ):
'''simple docstring'''
random.seed(__UpperCAmelCase )
__lowerCamelCase = list(__UpperCAmelCase )
__lowerCamelCase = [
IterableDatasetShard(
__UpperCAmelCase , batch_size=__UpperCAmelCase , drop_last=__UpperCAmelCase , num_processes=__UpperCAmelCase , process_index=__UpperCAmelCase , split_batches=__UpperCAmelCase , )
for i in range(__UpperCAmelCase )
]
__lowerCamelCase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__UpperCAmelCase )
iterable_dataset_lists.append(list(__UpperCAmelCase ) )
__lowerCamelCase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__lowerCamelCase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
self.assertTrue(len(__UpperCAmelCase ) % shard_batch_size == 0 )
__lowerCamelCase = []
for idx in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__UpperCAmelCase ) < len(__UpperCAmelCase ):
reference += reference
self.assertListEqual(__UpperCAmelCase , reference[: len(__UpperCAmelCase )] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 42
__lowerCamelCase = RandomIterableDataset()
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
# Edge case with a very small dataset
__lowerCamelCase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = SkipBatchSampler(__UpperCAmelCase , 2 )
self.assertListEqual(list(__UpperCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 )
__lowerCamelCase = skip_first_batches(__UpperCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowerCamelCase ( self ):
'''simple docstring'''
Accelerator()
__lowerCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 330 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_UpperCamelCase: Dict = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self : str, lowerCAmelCase : Union[str, Any], lowerCAmelCase : int=7, lowerCAmelCase : Tuple=3, lowerCAmelCase : Any=18, lowerCAmelCase : Dict=30, lowerCAmelCase : Dict=400, lowerCAmelCase : List[Any]=None, lowerCAmelCase : Dict=True, lowerCAmelCase : List[Any]=True, lowerCAmelCase : List[str]=None, ) -> str:
lowercase : int = size if size is not None else {'height': 20, 'width': 20}
lowercase : Optional[int] = parent
lowercase : Optional[Any] = batch_size
lowercase : Optional[Any] = num_channels
lowercase : Optional[int] = image_size
lowercase : int = min_resolution
lowercase : Optional[int] = max_resolution
lowercase : List[str] = size
lowercase : List[str] = do_normalize
lowercase : str = do_convert_rgb
lowercase : Dict = [512, 1024, 2048, 4096]
lowercase : Optional[Any] = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def lowercase ( self : List[str] ) -> Optional[Any]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowercase ( self : Union[str, Any] ) -> List[Any]:
lowercase : Dict = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
lowercase : Union[str, Any] = Image.open(requests.get(lowerCAmelCase, stream=lowerCAmelCase ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11, reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.', )
@require_torch
@require_vision
class a__ ( SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = PixaStructImageProcessor if is_vision_available() else None
def lowercase ( self : int ) -> str:
lowercase : Any = PixaStructImageProcessingTester(self )
@property
def lowercase ( self : Any ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self : int ) -> str:
lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase, 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase, 'do_convert_rgb' ) )
def lowercase ( self : str ) -> int:
lowercase : Union[str, Any] = self.image_processor_tester.prepare_dummy_image()
lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
lowercase : int = 2048
lowercase : Optional[Any] = image_processor(lowerCAmelCase, return_tensors='pt', max_patches=lowerCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0606 ), atol=1e-3, rtol=1e-3 ) )
def lowercase ( self : List[str] ) -> Tuple:
# Initialize image_processor
lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase, Image.Image )
# Test not batched input
lowercase : Any = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase : int = image_processor(
image_inputs[0], return_tensors='pt', max_patches=lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase : Any = image_processor(
lowerCAmelCase, return_tensors='pt', max_patches=lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase ( self : str ) -> Optional[int]:
# Initialize image_processor
lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase, Image.Image )
# Test not batched input
lowercase : Optional[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
lowercase : Optional[Any] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(lowerCAmelCase ):
lowercase : List[str] = image_processor(
image_inputs[0], return_tensors='pt', max_patches=lowerCAmelCase ).flattened_patches
lowercase : List[Any] = 'Hello'
lowercase : List[Any] = image_processor(
image_inputs[0], return_tensors='pt', max_patches=lowerCAmelCase, header_text=lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase : Union[str, Any] = image_processor(
lowerCAmelCase, return_tensors='pt', max_patches=lowerCAmelCase, header_text=lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase ( self : int ) -> Optional[int]:
# Initialize image_processor
lowercase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : Tuple = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase, numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase, np.ndarray )
lowercase : Optional[int] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase : List[str] = image_processor(
image_inputs[0], return_tensors='pt', max_patches=lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase : List[str] = image_processor(
lowerCAmelCase, return_tensors='pt', max_patches=lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase ( self : Any ) -> Optional[Any]:
# Initialize image_processor
lowercase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : List[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase, torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase, torch.Tensor )
# Test not batched input
lowercase : Dict = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase : str = image_processor(
image_inputs[0], return_tensors='pt', max_patches=lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase : List[str] = image_processor(
lowerCAmelCase, return_tensors='pt', max_patches=lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11, reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.', )
@require_torch
@require_vision
class a__ ( SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = PixaStructImageProcessor if is_vision_available() else None
def lowercase ( self : Any ) -> List[str]:
lowercase : Optional[Any] = PixaStructImageProcessingTester(self, num_channels=4 )
lowercase : str = 3
@property
def lowercase ( self : str ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self : str ) -> List[str]:
lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase, 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase, 'do_convert_rgb' ) )
def lowercase ( self : List[Any] ) -> List[str]:
# Initialize image_processor
lowercase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : str = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase, Image.Image )
# Test not batched input
lowercase : Union[str, Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase : Union[str, Any] = image_processor(
image_inputs[0], return_tensors='pt', max_patches=lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase : Optional[Any] = image_processor(
lowerCAmelCase, return_tensors='pt', max_patches=lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
| 360 |
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_UpperCamelCase: Any = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 53 | 0 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCAmelCase : Optional[int] =logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCAmelCase_ ( __lowerCamelCase : int ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : Dict ,__lowerCamelCase : Optional[int] ):
return max(metric_fn(__lowerCamelCase ,__lowerCamelCase ) for gt in ground_truths )
def UpperCAmelCase_ ( __lowerCamelCase : Any ,__lowerCamelCase : str ,__lowerCamelCase : Dict ):
lowercase_ :int = [line.strip() for line in open(__lowerCamelCase ,"r" ).readlines()]
lowercase_ :Optional[int] = []
if args.gold_data_mode == "qa":
lowercase_ :List[Any] = pd.read_csv(__lowerCamelCase ,sep="\t" ,header=__lowerCamelCase )
for answer_list in data[1]:
lowercase_ :Dict = ast.literal_eval(__lowerCamelCase )
answers.append(__lowerCamelCase )
else:
lowercase_ :Tuple = [line.strip() for line in open(__lowerCamelCase ,"r" ).readlines()]
lowercase_ :Any = [[reference] for reference in references]
lowercase_ :Dict = 0
for prediction, ground_truths in zip(__lowerCamelCase ,__lowerCamelCase ):
total += 1
em += metric_max_over_ground_truths(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
fa += metric_max_over_ground_truths(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
lowercase_ :str = 100.0 * em / total
lowercase_ :List[str] = 100.0 * fa / total
logger.info(F'F1: {fa:.2f}' )
logger.info(F'EM: {em:.2f}' )
def UpperCAmelCase_ ( __lowerCamelCase : Any ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : int ):
lowercase_ :Tuple = args.k
lowercase_ :Optional[Any] = [line.strip() for line in open(__lowerCamelCase ,"r" ).readlines()]
lowercase_ :Optional[int] = [line.strip() for line in open(__lowerCamelCase ,"r" ).readlines()]
lowercase_ :Dict = 0
for hypo, reference in zip(__lowerCamelCase ,__lowerCamelCase ):
lowercase_ :List[Any] = set(hypo.split("\t" )[:k] )
lowercase_ :int = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
lowercase_ :Tuple = 100.0 * em / total
logger.info(F'Precision@{k}: {em: .2f}' )
def UpperCAmelCase_ ( __lowerCamelCase : Dict ,__lowerCamelCase : str ,__lowerCamelCase : Optional[Any] ):
def strip_title(__lowerCamelCase : List[str] ):
if title.startswith("\"" ):
lowercase_ :List[str] = title[1:]
if title.endswith("\"" ):
lowercase_ :Optional[Any] = title[:-1]
return title
lowercase_ :int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__lowerCamelCase ,return_tensors="pt" ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,)["input_ids"].to(args.device )
lowercase_ :Dict = rag_model.rag.question_encoder(__lowerCamelCase )
lowercase_ :Optional[Any] = question_enc_outputs[0]
lowercase_ :List[str] = rag_model.retriever(
__lowerCamelCase ,question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() ,prefix=rag_model.rag.generator.config.prefix ,n_docs=rag_model.config.n_docs ,return_tensors="pt" ,)
lowercase_ :str = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
lowercase_ :List[str] = []
for docs in all_docs:
lowercase_ :Any = [strip_title(__lowerCamelCase ) for title in docs["title"]]
provenance_strings.append("\t".join(__lowerCamelCase ) )
return provenance_strings
def UpperCAmelCase_ ( __lowerCamelCase : Any ,__lowerCamelCase : List[str] ,__lowerCamelCase : List[str] ):
with torch.no_grad():
lowercase_ :List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__lowerCamelCase ,return_tensors="pt" ,padding=__lowerCamelCase ,truncation=__lowerCamelCase )
lowercase_ :str = inputs_dict.input_ids.to(args.device )
lowercase_ :Optional[Any] = inputs_dict.attention_mask.to(args.device )
lowercase_ :int = rag_model.generate( # rag_model overwrites generate
__lowerCamelCase ,attention_mask=__lowerCamelCase ,num_beams=args.num_beams ,min_length=args.min_length ,max_length=args.max_length ,early_stopping=__lowerCamelCase ,num_return_sequences=1 ,bad_words_ids=[[0, 0]] ,)
lowercase_ :int = rag_model.retriever.generator_tokenizer.batch_decode(__lowerCamelCase ,skip_special_tokens=__lowerCamelCase )
if args.print_predictions:
for q, a in zip(__lowerCamelCase ,__lowerCamelCase ):
logger.info("Q: {} - A: {}".format(__lowerCamelCase ,__lowerCamelCase ) )
return answers
def UpperCAmelCase_ ( ):
lowercase_ :Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_type" ,choices=["rag_sequence", "rag_token", "bart"] ,type=__lowerCamelCase ,help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) ,)
parser.add_argument(
"--index_name" ,default=__lowerCamelCase ,choices=["exact", "compressed", "legacy"] ,type=__lowerCamelCase ,help="RAG model retriever type" ,)
parser.add_argument(
"--index_path" ,default=__lowerCamelCase ,type=__lowerCamelCase ,help="Path to the retrieval index" ,)
parser.add_argument("--n_docs" ,default=5 ,type=__lowerCamelCase ,help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" ,default=__lowerCamelCase ,type=__lowerCamelCase ,required=__lowerCamelCase ,help="Path to pretrained checkpoints or model identifier from huggingface.co/models" ,)
parser.add_argument(
"--eval_mode" ,choices=["e2e", "retrieval"] ,default="e2e" ,type=__lowerCamelCase ,help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) ,)
parser.add_argument("--k" ,default=1 ,type=__lowerCamelCase ,help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" ,default=__lowerCamelCase ,type=__lowerCamelCase ,required=__lowerCamelCase ,help="Path to a file containing evaluation samples" ,)
parser.add_argument(
"--gold_data_path" ,default=__lowerCamelCase ,type=__lowerCamelCase ,required=__lowerCamelCase ,help="Path to a tab-separated file with gold samples" ,)
parser.add_argument(
"--gold_data_mode" ,default="qa" ,type=__lowerCamelCase ,choices=["qa", "ans"] ,help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) ,)
parser.add_argument(
"--predictions_path" ,type=__lowerCamelCase ,default="predictions.txt" ,help="Name of the predictions file, to be stored in the checkpoints directory" ,)
parser.add_argument(
"--eval_all_checkpoints" ,action="store_true" ,help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" ,)
parser.add_argument(
"--eval_batch_size" ,default=8 ,type=__lowerCamelCase ,help="Batch size per GPU/CPU for evaluation." ,)
parser.add_argument(
"--recalculate" ,help="Recalculate predictions even if the prediction file exists" ,action="store_true" ,)
parser.add_argument(
"--num_beams" ,default=4 ,type=__lowerCamelCase ,help="Number of beams to be used when generating answers" ,)
parser.add_argument("--min_length" ,default=1 ,type=__lowerCamelCase ,help="Min length of the generated answers" )
parser.add_argument("--max_length" ,default=50 ,type=__lowerCamelCase ,help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" ,action="store_true" ,help="If True, prints predictions while evaluating." ,)
parser.add_argument(
"--print_docs" ,action="store_true" ,help="If True, prints docs retried while generating." ,)
lowercase_ :Optional[Any] = parser.parse_args()
lowercase_ :int = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def UpperCAmelCase_ ( __lowerCamelCase : List[Any] ):
lowercase_ :Optional[int] = {}
if args.model_type is None:
lowercase_ :Tuple = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
lowercase_ :Tuple = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
lowercase_ :int = args.n_docs
if args.index_name is not None:
lowercase_ :str = args.index_name
if args.index_path is not None:
lowercase_ :List[Any] = args.index_path
else:
lowercase_ :int = BartForConditionalGeneration
lowercase_ :int = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" ,__lowerCamelCase )
lowercase_ :Dict = get_scores if args.eval_mode == "e2e" else get_precision_at_k
lowercase_ :Tuple = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(__lowerCamelCase ,args.predictions_path ,args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(__lowerCamelCase ) )
logger.info(" Batch size = %d" ,args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
lowercase_ :List[Any] = RagRetriever.from_pretrained(__lowerCamelCase ,**__lowerCamelCase )
lowercase_ :Tuple = model_class.from_pretrained(__lowerCamelCase ,retriever=__lowerCamelCase ,**__lowerCamelCase )
model.retriever.init_retrieval()
else:
lowercase_ :Tuple = model_class.from_pretrained(__lowerCamelCase ,**__lowerCamelCase )
model.to(args.device )
with open(args.evaluation_set ,"r" ) as eval_file, open(args.predictions_path ,"w" ) as preds_file:
lowercase_ :Tuple = []
for line in tqdm(__lowerCamelCase ):
questions.append(line.strip() )
if len(__lowerCamelCase ) == args.eval_batch_size:
lowercase_ :Any = evaluate_batch_fn(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
preds_file.write("\n".join(__lowerCamelCase ) + "\n" )
preds_file.flush()
lowercase_ :List[Any] = []
if len(__lowerCamelCase ) > 0:
lowercase_ :int = evaluate_batch_fn(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
preds_file.write("\n".join(__lowerCamelCase ) )
preds_file.flush()
score_fn(__lowerCamelCase ,args.predictions_path ,args.gold_data_path )
if __name__ == "__main__":
lowerCAmelCase : Dict =get_args()
main(args)
| 223 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : int = 1_00 ):
lowercase_ :Tuple = n * (n + 1) * (2 * n + 1) / 6
lowercase_ :List[str] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 223 | 1 |
import baseaa
def lowerCamelCase_ ( UpperCamelCase__ : str ):
'''simple docstring'''
return baseaa.baaencode(string.encode('''utf-8''' ) )
def lowerCamelCase_ ( UpperCamelCase__ : bytes ):
'''simple docstring'''
return baseaa.baadecode(UpperCamelCase__ ).decode('''utf-8''' )
if __name__ == "__main__":
lowercase = """Hello World!"""
lowercase = baseaa_encode(test)
print(encoded)
lowercase = baseaa_decode(encoded)
print(decoded)
| 35 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase = {
"""configuration_xlm_roberta""": [
"""XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaConfig""",
"""XLMRobertaOnnxConfig""",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""XLMRobertaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""XLMRobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaForCausalLM""",
"""XLMRobertaForMaskedLM""",
"""XLMRobertaForMultipleChoice""",
"""XLMRobertaForQuestionAnswering""",
"""XLMRobertaForSequenceClassification""",
"""XLMRobertaForTokenClassification""",
"""XLMRobertaModel""",
"""XLMRobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMRobertaForCausalLM""",
"""TFXLMRobertaForMaskedLM""",
"""TFXLMRobertaForMultipleChoice""",
"""TFXLMRobertaForQuestionAnswering""",
"""TFXLMRobertaForSequenceClassification""",
"""TFXLMRobertaForTokenClassification""",
"""TFXLMRobertaModel""",
"""TFXLMRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxXLMRobertaForMaskedLM""",
"""FlaxXLMRobertaForCausalLM""",
"""FlaxXLMRobertaForMultipleChoice""",
"""FlaxXLMRobertaForQuestionAnswering""",
"""FlaxXLMRobertaForSequenceClassification""",
"""FlaxXLMRobertaForTokenClassification""",
"""FlaxXLMRobertaModel""",
"""FlaxXLMRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 35 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> int:
'''simple docstring'''
A__ = 384
A__ = 7
if "tiny" in model_name:
A__ = 96
A__ = (2, 2, 6, 2)
A__ = (3, 6, 12, 24)
elif "small" in model_name:
A__ = 96
A__ = (2, 2, 18, 2)
A__ = (3, 6, 12, 24)
elif "base" in model_name:
A__ = 128
A__ = (2, 2, 18, 2)
A__ = (4, 8, 16, 32)
A__ = 12
A__ = 512
elif "large" in model_name:
A__ = 192
A__ = (2, 2, 18, 2)
A__ = (6, 12, 24, 48)
A__ = 12
A__ = 768
# set label information
A__ = 150
A__ = 'huggingface/label-files'
A__ = 'ade20k-id2label.json'
A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
A__ = {v: k for k, v in idalabel.items()}
A__ = SwinConfig(
embed_dim=SCREAMING_SNAKE_CASE__ , depths=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , window_size=SCREAMING_SNAKE_CASE__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
A__ = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE__ , auxiliary_in_channels=SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ , )
return config
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ = dct.pop(SCREAMING_SNAKE_CASE__ )
A__ = val
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
'''simple docstring'''
A__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' )
A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:dim, :]
A__ = in_proj_bias[: dim]
A__ = in_proj_weight[
dim : dim * 2, :
]
A__ = in_proj_bias[
dim : dim * 2
]
A__ = in_proj_weight[
-dim :, :
]
A__ = in_proj_bias[-dim :]
# fmt: on
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
A__ , A__ = x.shape
A__ = x.reshape(SCREAMING_SNAKE_CASE__ , 4 , in_channel // 4 )
A__ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]:
'''simple docstring'''
A__ , A__ = x.shape
A__ = x.reshape(SCREAMING_SNAKE_CASE__ , in_channel // 4 , 4 )
A__ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
'''simple docstring'''
A__ = x.shape[0]
A__ = x.reshape(4 , in_channel // 4 )
A__ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
'''simple docstring'''
A__ = x.shape[0]
A__ = x.reshape(in_channel // 4 , 4 )
A__ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A__ = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
A__ = model_name_to_url[model_name]
A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' , file_name=SCREAMING_SNAKE_CASE__ )[
'state_dict'
]
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE__ , param.shape )
A__ = get_upernet_config(SCREAMING_SNAKE_CASE__ )
A__ = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A__ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "bn" in key:
A__ = key.replace('bn' , 'batch_norm' )
A__ = val
# rename keys
A__ = create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
A__ = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE__ )
if "norm" in key:
A__ = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify on image
A__ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('RGB' )
A__ = SegformerImageProcessor()
A__ = processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
with torch.no_grad():
A__ = model(SCREAMING_SNAKE_CASE__ )
A__ = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
A__ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
A__ = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
A__ = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
A__ = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[f"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowercase_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 7 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """transfo-xl"""
__UpperCamelCase = ["""mems"""]
__UpperCamelCase = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self :List[Any] , lowercase_ :Optional[int]=26_77_35 , lowercase_ :Union[str, Any]=[2_00_00, 4_00_00, 20_00_00] , lowercase_ :List[Any]=10_24 , lowercase_ :Optional[Any]=10_24 , lowercase_ :Tuple=16 , lowercase_ :Tuple=64 , lowercase_ :Any=40_96 , lowercase_ :int=4 , lowercase_ :List[str]=False , lowercase_ :Union[str, Any]=18 , lowercase_ :Optional[Any]=16_00 , lowercase_ :Dict=10_00 , lowercase_ :Optional[int]=True , lowercase_ :Tuple=True , lowercase_ :Dict=0 , lowercase_ :Tuple=-1 , lowercase_ :Optional[int]=True , lowercase_ :Optional[int]=0.1 , lowercase_ :str=0.0 , lowercase_ :List[str]=True , lowercase_ :int="normal" , lowercase_ :Dict=0.01 , lowercase_ :Optional[Any]=0.01 , lowercase_ :Dict=0.02 , lowercase_ :Tuple=1E-5 , lowercase_ :str=0 , **lowercase_ :Tuple , ) -> List[str]:
UpperCAmelCase = vocab_size
UpperCAmelCase = []
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
UpperCAmelCase = [False] + [True] * len(self.cutoffs )
else:
UpperCAmelCase = [False] + [False] * len(self.cutoffs )
UpperCAmelCase = d_model
UpperCAmelCase = d_embed
UpperCAmelCase = d_head
UpperCAmelCase = d_inner
UpperCAmelCase = div_val
UpperCAmelCase = pre_lnorm
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = mem_len
UpperCAmelCase = same_length
UpperCAmelCase = attn_type
UpperCAmelCase = clamp_len
UpperCAmelCase = sample_softmax
UpperCAmelCase = adaptive
UpperCAmelCase = dropout
UpperCAmelCase = dropatt
UpperCAmelCase = untie_r
UpperCAmelCase = init
UpperCAmelCase = init_range
UpperCAmelCase = proj_init_std
UpperCAmelCase = init_std
UpperCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Any:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any ) -> Tuple:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 78 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ :List[str] = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Any = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase__ :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 351 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def lowerCAmelCase__ ( a__: Dict , a__: Dict , a__: Any , a__: Optional[int]=None , a__: str=None , a__: List[Any]=None , a__: Optional[int]=None , a__: Union[str, Any]=None , ) -> Tuple:
'''simple docstring'''
if attention_mask is None:
_UpperCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_UpperCAmelCase = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=a__ )
if decoder_head_mask is None:
_UpperCAmelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=a__ )
if cross_attn_head_mask is None:
_UpperCAmelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=a__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=20 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , ) -> Any:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.eos_token_id # Eos Token
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = prepare_mam_aaa_inputs_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return config, inputs_dict
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = MaMaaaModel(config=_SCREAMING_SNAKE_CASE ).get_decoder().to(_SCREAMING_SNAKE_CASE ).eval()
_UpperCAmelCase = inputs_dict['input_ids']
_UpperCAmelCase = inputs_dict['attention_mask']
_UpperCAmelCase = inputs_dict['head_mask']
# first forward pass
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )['last_hidden_state']
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE )[
'last_hidden_state'
]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-2 ) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = MaMaaaModel(config=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ).eval()
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = outputs.encoder_last_hidden_state
_UpperCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_encoder()
encoder.save_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = MaMaaaEncoder.from_pretrained(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_decoder()
decoder.save_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = MaMaaaDecoder.from_pretrained(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = decoder(
input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=inputs_dict['attention_mask'] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_a : List[Any] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
_a : List[str] = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
_a : int = (
{
'conversational': MaMaaaForConditionalGeneration,
'feature-extraction': MaMaaaModel,
'summarization': MaMaaaForConditionalGeneration,
'text2text-generation': MaMaaaForConditionalGeneration,
'translation': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
_a : str = True
_a : Union[str, Any] = True
_a : Optional[int] = False
_a : Union[str, Any] = False
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = MaMaaaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertEqual(info['missing_keys'] , [] )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = copy.deepcopy(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if not self.is_encoder_decoder:
_UpperCAmelCase = inputs['input_ids']
del inputs["input_ids"]
else:
_UpperCAmelCase = inputs['input_ids']
_UpperCAmelCase = inputs.get('decoder_input_ids' , _SCREAMING_SNAKE_CASE )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model.get_input_embeddings()
if not self.is_encoder_decoder:
_UpperCAmelCase = wte(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = wte(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = wte(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
model(**_SCREAMING_SNAKE_CASE )[0]
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = input_dict['input_ids']
_UpperCAmelCase = input_ids.ne(1 ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = MaMaaaForConditionalGeneration(_SCREAMING_SNAKE_CASE ).eval().to(_SCREAMING_SNAKE_CASE )
if torch_device == "cuda":
model.half()
model.generate(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
model.generate(num_beams=4 , do_sample=_SCREAMING_SNAKE_CASE , early_stopping=_SCREAMING_SNAKE_CASE , num_return_sequences=3 )
def lowerCAmelCase__ ( a__: Tuple ) -> Optional[int]:
'''simple docstring'''
return torch.tensor(a__ , dtype=torch.long , device=a__ )
lowerCAmelCase__ :str = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __a ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
_UpperCAmelCase = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
_UpperCAmelCase = prepare_mam_aaa_inputs_dict(model.config , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )[0]
_UpperCAmelCase = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# change to expected output here
_UpperCAmelCase = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(_SCREAMING_SNAKE_CASE )
# change to intended input
_UpperCAmelCase = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
_UpperCAmelCase = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
_UpperCAmelCase = prepare_mam_aaa_inputs_dict(model.config , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )[0]
_UpperCAmelCase = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# change to expected output here
_UpperCAmelCase = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' )
_UpperCAmelCase = [
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'
' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'
' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
_UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
_UpperCAmelCase = model.generate(
input_ids=dct['input_ids'].to(_SCREAMING_SNAKE_CASE ) , attention_mask=dct['attention_mask'].to(_SCREAMING_SNAKE_CASE ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , )
_UpperCAmelCase = [
'The NSA case highlights the total absence of intelligence debate',
'I think there are two levels of response from the French government.',
'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'
' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'
' communications in France.',
]
_UpperCAmelCase = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
assert generated == expected_en
| 185 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Any = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : int = False
def A ( self : Dict , A : Dict , A : List[Any] , A : Tuple=False ) -> int:
lowercase_ : str = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class in get_values(A ):
lowercase_ : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _UpperCAmelCase ( _A ):
def __init__( self : Any , A : int , A : int=13 , A : Any=7 , A : Optional[Any]=True , A : List[str]=True , A : List[Any]=True , A : Dict=True , A : List[str]=99 , A : str=32 , A : str=32 , A : List[Any]=2 , A : Tuple=4 , A : Optional[Any]=37 , A : Tuple="gelu" , A : Optional[int]=0.1 , A : Tuple=0.1 , A : Optional[int]=5_12 , A : List[Any]=16 , A : Optional[Any]=2 , A : Any=0.02 , A : List[str]=3 , A : Dict=4 , A : Tuple=None , ) -> List[Any]:
lowercase_ : Optional[int] = parent
lowercase_ : Optional[int] = batch_size
lowercase_ : Union[str, Any] = seq_length
lowercase_ : List[str] = is_training
lowercase_ : str = use_input_mask
lowercase_ : Tuple = use_token_type_ids
lowercase_ : Any = use_labels
lowercase_ : Optional[int] = vocab_size
lowercase_ : Tuple = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Tuple = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : str = max_position_embeddings
lowercase_ : str = type_vocab_size
lowercase_ : Tuple = type_sequence_label_size
lowercase_ : str = initializer_range
lowercase_ : str = num_labels
lowercase_ : Optional[Any] = num_choices
lowercase_ : Any = scope
lowercase_ : int = embedding_size
def A ( self : List[Any] ) -> Any:
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Any = None
if self.use_input_mask:
lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : int = None
if self.use_token_type_ids:
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : str = None
lowercase_ : Dict = None
lowercase_ : Any = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : int = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : List[str] , A : List[str] , A : Optional[int] , A : str , A : Union[str, Any] , A : List[str] , A : Optional[Any] , A : Union[str, Any] ) -> Optional[Any]:
lowercase_ : Optional[int] = TFMobileBertModel(config=A )
lowercase_ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ : Tuple = model(A )
lowercase_ : Optional[int] = [input_ids, input_mask]
lowercase_ : int = model(A )
lowercase_ : Optional[int] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , A : int , A : int , A : int , A : int , A : Optional[int] , A : List[str] , A : Dict ) -> List[Any]:
lowercase_ : List[Any] = TFMobileBertForMaskedLM(config=A )
lowercase_ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ : Optional[int] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[Any] , A : Optional[Any] , A : int , A : Dict , A : Union[str, Any] , A : List[Any] , A : List[str] , A : List[Any] ) -> Union[str, Any]:
lowercase_ : Optional[int] = TFMobileBertForNextSentencePrediction(config=A )
lowercase_ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ : Optional[int] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A ( self : Any , A : List[str] , A : Union[str, Any] , A : Any , A : Any , A : Tuple , A : Optional[Any] , A : str ) -> List[str]:
lowercase_ : Optional[Any] = TFMobileBertForPreTraining(config=A )
lowercase_ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ : List[Any] = model(A )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A ( self : Dict , A : Optional[int] , A : List[Any] , A : List[Any] , A : Union[str, Any] , A : Optional[Any] , A : Any , A : str ) -> str:
lowercase_ : Tuple = self.num_labels
lowercase_ : Optional[Any] = TFMobileBertForSequenceClassification(config=A )
lowercase_ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ : Optional[int] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : List[str] , A : Optional[int] , A : Union[str, Any] , A : int , A : Optional[int] , A : List[Any] , A : Any , A : Tuple ) -> str:
lowercase_ : Tuple = self.num_choices
lowercase_ : List[str] = TFMobileBertForMultipleChoice(config=A )
lowercase_ : Tuple = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
lowercase_ : Dict = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
lowercase_ : List[str] = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
lowercase_ : Optional[int] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase_ : Union[str, Any] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Tuple , A : Optional[int] , A : Optional[Any] , A : Any , A : Dict , A : Optional[int] , A : Dict , A : str ) -> str:
lowercase_ : Optional[Any] = self.num_labels
lowercase_ : Optional[int] = TFMobileBertForTokenClassification(config=A )
lowercase_ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ : Tuple = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Dict , A : Dict , A : Optional[Any] , A : List[Any] , A : Union[str, Any] , A : List[str] , A : int , A : str ) -> Optional[Any]:
lowercase_ : Optional[int] = TFMobileBertForQuestionAnswering(config=A )
lowercase_ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ : Union[str, Any] = model(A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : int ) -> Union[str, Any]:
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Any = config_and_inputs
lowercase_ : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def A ( self : Tuple ) -> Tuple:
lowercase_ : List[Any] = TFMobileBertModelTest.TFMobileBertModelTester(self )
lowercase_ : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def A ( self : int ) -> Optional[Any]:
self.config_tester.run_common_tests()
def A ( self : List[str] ) -> Tuple:
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*A )
def A ( self : List[Any] ) -> Union[str, Any]:
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*A )
def A ( self : List[str] ) -> List[Any]:
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*A )
def A ( self : str ) -> Any:
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*A )
def A ( self : Optional[int] ) -> int:
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*A )
def A ( self : str ) -> int:
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*A )
def A ( self : Optional[Any] ) -> List[str]:
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*A )
def A ( self : str ) -> Any:
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*A )
@slow
def A ( self : Optional[Any] ) -> Optional[Any]:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
lowercase_ : Any = TFMobileBertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def A ( self : Union[str, Any] ) -> List[Any]:
lowercase_ : Any = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
lowercase_ : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase_ : List[Any] = model(A )[0]
lowercase_ : Optional[int] = [1, 6, 3_05_22]
self.assertEqual(output.shape , A )
lowercase_ : Tuple = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A , atol=1e-4 )
| 33 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = False):
UpperCAmelCase__ : str = scheduler
UpperCAmelCase__ : Dict = optimizers if isinstance(_lowerCamelCase , (list, tuple)) else [optimizers]
UpperCAmelCase__ : List[Any] = split_batches
UpperCAmelCase__ : Tuple = step_with_optimizer
UpperCAmelCase__ : Union[str, Any] = GradientState()
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
UpperCAmelCase__ : Dict = AcceleratorState().num_processes
for _ in range(_lowerCamelCase):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps"""):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
else:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self):
return self.scheduler.get_last_lr()
def snake_case__ ( self):
return self.scheduler.state_dict()
def snake_case__ ( self , _lowerCamelCase):
self.scheduler.load_state_dict(_lowerCamelCase)
def snake_case__ ( self):
return self.scheduler.get_lr()
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.scheduler.print_lr(*_lowerCamelCase , **_lowerCamelCase) | 163 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A ( unittest.TestCase ):
def __init__(self : Union[str, Any] , __a : Tuple , __a : Union[str, Any]=7 , __a : Tuple=3 , __a : List[str]=18 , __a : Optional[int]=30 , __a : Optional[int]=400 , __a : str=True , __a : int=None , __a : List[str]=True , __a : Tuple=None , ):
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 20}
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
def _lowercase (self : List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : List[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = MobileNetVaImageProcessingTester(self )
@property
def _lowercase (self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "size" ) )
self.assertTrue(hasattr(__a , "do_center_crop" ) )
self.assertTrue(hasattr(__a , "crop_size" ) )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _lowercase (self : Dict ):
pass
def _lowercase (self : Any ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase (self : Union[str, Any] ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase (self : Dict ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 106 | '''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Dict ={
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
SCREAMING_SNAKE_CASE_: List[str] =[
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Any ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase_ = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
UpperCAmelCase_ = getattr(snake_case_ , snake_case_ ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
UpperCAmelCase_ = None
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
elif name.split("." )[0] == "proj":
UpperCAmelCase_ = fairseq_model.proj
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(snake_case_ )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
UpperCAmelCase_ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ = "weight_v"
elif "bias" in name:
UpperCAmelCase_ = "bias"
elif "weight" in name:
UpperCAmelCase_ = "weight"
else:
UpperCAmelCase_ = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Any , snake_case_ : int ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = emb.weight.shape
UpperCAmelCase_ = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
UpperCAmelCase_ = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( snake_case_ : int ) -> List[str]:
'''simple docstring'''
with open(snake_case_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [line.split(" " )[0] for line in lines]
UpperCAmelCase_ = len(snake_case_ )
UpperCAmelCase_ = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(snake_case_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Any , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = WavaVecaConfig.from_pretrained(snake_case_ )
UpperCAmelCase_ = SpeechaTextaConfig.from_pretrained(
snake_case_ , vocab_size=snake_case_ , decoder_layers=snake_case_ , do_stable_layer_norm=snake_case_ )
UpperCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
UpperCAmelCase_ = model[0].eval()
# set weights for wav2vec2 encoder
UpperCAmelCase_ = WavaVecaModel(snake_case_ )
UpperCAmelCase_ = recursively_load_weights_wavaveca(model.encoder , snake_case_ )
UpperCAmelCase_ = SpeechaTextaForCausalLM(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case_ )
# set output linear layer
unexpected_keys.remove("embed_out" )
UpperCAmelCase_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
UpperCAmelCase_ = SpeechEncoderDecoderModel(encoder=snake_case_ , decoder=snake_case_ )
UpperCAmelCase_ = False
# add projection layer
UpperCAmelCase_ = nn.Parameter(projection_layer.weight )
UpperCAmelCase_ = nn.Parameter(projection_layer.bias )
UpperCAmelCase_ = create_vocab_dict(snake_case_ )
with open(os.path.join(snake_case_ , "vocab.json" ) , "w" ) as fp:
json.dump(snake_case_ , snake_case_ )
UpperCAmelCase_ = SpeechaTextaTokenizer(os.path.join(snake_case_ , "vocab.json" ) )
tokenizer.save_pretrained(snake_case_ )
UpperCAmelCase_ = hf_wavavec.config.to_dict()
UpperCAmelCase_ = tokenizer.pad_token_id
UpperCAmelCase_ = tokenizer.bos_token_id
UpperCAmelCase_ = tokenizer.eos_token_id
UpperCAmelCase_ = "speech_to_text_2"
UpperCAmelCase_ = "wav2vec2"
UpperCAmelCase_ = SpeechEncoderDecoderConfig.from_dict(snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
feature_extractor.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 106 | 1 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def __lt__( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return self[-1] < other[-1]
def __eq__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return self[-1] == other[-1]
def _A ( SCREAMING_SNAKE_CASE__ : list ):
UpperCamelCase :list[Stack] = []
# sort into stacks
for element in collection:
UpperCamelCase :Union[str, Any] = Stack([element] )
UpperCamelCase :List[str] = bisect_left(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if i != len(SCREAMING_SNAKE_CASE__ ):
stacks[i].append(SCREAMING_SNAKE_CASE__ )
else:
stacks.append(SCREAMING_SNAKE_CASE__ )
# use a heap-based merge to merge stack efficiently
UpperCamelCase :str = merge(*(reversed(SCREAMING_SNAKE_CASE__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case = input("""Enter numbers separated by a comma:\n""").strip()
__snake_case = [int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 259 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
__snake_case = """__DUMMY_TRANSFORMERS_USER__"""
__snake_case = """Dummy User"""
__snake_case = """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"""
__snake_case = """https://hub-ci.huggingface.co"""
__snake_case = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
__snake_case = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
__snake_case = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def _A ( SCREAMING_SNAKE_CASE__ : Tuple ):
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def _A ( SCREAMING_SNAKE_CASE__ : Any ):
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , SCREAMING_SNAKE_CASE__ )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def _A ( SCREAMING_SNAKE_CASE__ : List[str] ):
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ):
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def _A ( ):
return HfApi(endpoint=SCREAMING_SNAKE_CASE__ )
@pytest.fixture(scope='''session''' )
def _A ( SCREAMING_SNAKE_CASE__ : HfApi ):
UpperCamelCase :Tuple = HfFolder.get_token()
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def _A ( SCREAMING_SNAKE_CASE__ : Dict ):
def _cleanup_repo(SCREAMING_SNAKE_CASE__ : Tuple ):
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def _A ( SCREAMING_SNAKE_CASE__ : Tuple ):
@contextmanager
def _temporary_repo(SCREAMING_SNAKE_CASE__ : Any ):
try:
yield repo_id
finally:
cleanup_repo(SCREAMING_SNAKE_CASE__ )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def _A ( SCREAMING_SNAKE_CASE__ : HfApi , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
UpperCamelCase :Union[str, Any] = F'''repo_txt_data-{int(time.time() * 1_0e3 )}'''
UpperCamelCase :int = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , private=SCREAMING_SNAKE_CASE__ )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__ ) , path_in_repo='''data/text_data.txt''' , repo_id=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def _A ( SCREAMING_SNAKE_CASE__ : HfApi , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ):
UpperCamelCase :Optional[int] = F'''repo_zipped_txt_data-{int(time.time() * 1_0e3 )}'''
UpperCamelCase :Any = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , private=SCREAMING_SNAKE_CASE__ )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__ ) , path_in_repo='''data.zip''' , repo_id=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def _A ( SCREAMING_SNAKE_CASE__ : HfApi , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
UpperCamelCase :Dict = F'''repo_zipped_img_data-{int(time.time() * 1_0e3 )}'''
UpperCamelCase :Dict = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , private=SCREAMING_SNAKE_CASE__ )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__ ) , path_in_repo='''data.zip''' , repo_id=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
return hf_private_dataset_repo_zipped_img_data_
| 259 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 100 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'dandelin/vilt-b32-finetuned-vqa'
lowercase__ = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
lowercase__ = 'image_qa'
lowercase__ = AutoProcessor
lowercase__ = AutoModelForVisualQuestionAnswering
lowercase__ = ['image', 'text']
lowercase__ = ['text']
def __init__( self , *__a , **__a) -> int:
'''simple docstring'''
requires_backends(self , ['''vision'''])
super().__init__(*__a , **__a)
def UpperCAmelCase ( self , __a , __a) -> Dict:
'''simple docstring'''
return self.pre_processor(__a , __a , return_tensors='''pt''')
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
with torch.no_grad():
return self.model(**__a).logits
def UpperCAmelCase ( self , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = outputs.argmax(-1).item()
return self.model.config.idalabel[idx]
| 100 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = MobileBertTokenizer
lowercase__ = MobileBertTokenizerFast
lowercase__ = True
lowercase__ = True
lowercase__ = filter_non_english
lowercase__ = "google/mobilebert-uncased"
def __lowerCAmelCase ( self : Any ):
super().setUp()
lowerCAmelCase__ : Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCAmelCase__ : str = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __lowerCAmelCase ( self : str ,lowercase_ : Optional[int] ):
lowerCAmelCase__ : Optional[int] = '''UNwant\u00E9d,running'''
lowerCAmelCase__ : Tuple = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Optional[Any] = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ : int = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(lowercase_ ,['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) ,[9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCAmelCase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ : List[Any] = self.get_tokenizer()
lowerCAmelCase__ : Dict = self.get_rust_tokenizer()
lowerCAmelCase__ : str = '''UNwant\u00E9d,running'''
lowerCAmelCase__ : Dict = tokenizer.tokenize(lowercase_ )
lowerCAmelCase__ : str = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
lowerCAmelCase__ : List[str] = tokenizer.encode(lowercase_ ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : Any = rust_tokenizer.encode(lowercase_ ,add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
lowerCAmelCase__ : str = tokenizer.encode(lowercase_ )
lowerCAmelCase__ : Optional[int] = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
# With lower casing
lowerCAmelCase__ : int = self.get_tokenizer(do_lower_case=lowercase_ )
lowerCAmelCase__ : Optional[Any] = self.get_rust_tokenizer(do_lower_case=lowercase_ )
lowerCAmelCase__ : Optional[Any] = '''UNwant\u00E9d,running'''
lowerCAmelCase__ : List[Any] = tokenizer.tokenize(lowercase_ )
lowerCAmelCase__ : Any = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Union[str, Any] = tokenizer.encode(lowercase_ ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : Tuple = rust_tokenizer.encode(lowercase_ ,add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
lowerCAmelCase__ : int = self.get_rust_tokenizer()
lowerCAmelCase__ : str = tokenizer.encode(lowercase_ )
lowerCAmelCase__ : List[str] = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : int = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) ,['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : List[str] = BasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) ,['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : Union[str, Any] = BasicTokenizer(do_lower_case=lowercase_ ,strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''h\u00E9llo'''] )
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : Any = BasicTokenizer(do_lower_case=lowercase_ ,strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Union[str, Any] = BasicTokenizer(do_lower_case=lowercase_ ,strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Optional[int] = BasicTokenizer(do_lower_case=lowercase_ ,strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Tuple = BasicTokenizer(do_lower_case=lowercase_ ,never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Any = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCAmelCase__ : Tuple = {}
for i, token in enumerate(lowercase_ ):
lowerCAmelCase__ : Optional[Any] = i
lowerCAmelCase__ : List[Any] = WordpieceTokenizer(vocab=lowercase_ ,unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) ,[] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) ,['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) ,['''[UNK]''', '''runn''', '''##ing'''] )
def __lowerCAmelCase ( self : Dict ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __lowerCAmelCase ( self : List[Any] ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __lowerCAmelCase ( self : Any ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Any = self.get_tokenizer()
lowerCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowercase_ ) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowercase_ ) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
lowerCAmelCase__ : Union[str, Any] = tokenizer.encode('''sequence builders''' ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : Any = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase_ )
lowerCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(lowercase_ ,lowercase_ )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def __lowerCAmelCase ( self : str ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ ,**lowercase_ )
lowerCAmelCase__ : List[str] = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowerCAmelCase__ : Union[str, Any] = tokenizer_r.encode_plus(
lowercase_ ,return_attention_mask=lowercase_ ,return_token_type_ids=lowercase_ ,return_offsets_mapping=lowercase_ ,add_special_tokens=lowercase_ ,)
lowerCAmelCase__ : List[Any] = tokenizer_r.do_lower_case if hasattr(lowercase_ ,'''do_lower_case''' ) else False
lowerCAmelCase__ : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens['''offset_mapping'''] )
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : Union[str, Any] = ['''的''', '''人''', '''有''']
lowerCAmelCase__ : Optional[Any] = ''''''.join(lowercase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(lowercase_ ,**lowercase_ )
lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ ,**lowercase_ )
lowerCAmelCase__ : Dict = tokenizer_p.encode(lowercase_ ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : Tuple = tokenizer_r.encode(lowercase_ ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : Tuple = tokenizer_r.convert_ids_to_tokens(lowercase_ )
lowerCAmelCase__ : Tuple = tokenizer_p.convert_ids_to_tokens(lowercase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowercase_ ,lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : Dict = self.rust_tokenizer_class.from_pretrained(lowercase_ ,**lowercase_ )
lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(lowercase_ ,**lowercase_ )
lowerCAmelCase__ : Any = tokenizer_r.encode(lowercase_ ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : List[str] = tokenizer_p.encode(lowercase_ ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : Any = tokenizer_r.convert_ids_to_tokens(lowercase_ )
lowerCAmelCase__ : Dict = tokenizer_p.convert_ids_to_tokens(lowercase_ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase__ : int = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(lowercase_ )
]
self.assertListEqual(lowercase_ ,lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
| 106 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = ["pixel_values"]
def __init__( self : str ,lowercase_ : bool = True ,lowercase_ : Dict[str, int] = None ,lowercase_ : PILImageResampling = PILImageResampling.BICUBIC ,lowercase_ : bool = True ,lowercase_ : Dict[str, int] = None ,lowercase_ : bool = True ,lowercase_ : Union[int, float] = 1 / 2_5_5 ,lowercase_ : bool = True ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : bool = True ,**lowercase_ : Optional[Any] ,):
super().__init__(**lowercase_ )
lowerCAmelCase__ : List[str] = size if size is not None else {'''shortest_edge''': 2_2_4}
lowerCAmelCase__ : Tuple = get_size_dict(lowercase_ ,default_to_square=lowercase_ )
lowerCAmelCase__ : Optional[int] = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowerCAmelCase__ : Optional[Any] = get_size_dict(lowercase_ ,default_to_square=lowercase_ ,param_name='''crop_size''' )
lowerCAmelCase__ : Dict = do_resize
lowerCAmelCase__ : Optional[int] = size
lowerCAmelCase__ : Dict = resample
lowerCAmelCase__ : Optional[Any] = do_center_crop
lowerCAmelCase__ : Dict = crop_size
lowerCAmelCase__ : Tuple = do_rescale
lowerCAmelCase__ : str = rescale_factor
lowerCAmelCase__ : List[str] = do_normalize
lowerCAmelCase__ : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase__ : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase__ : Optional[Any] = do_convert_rgb
def __lowerCAmelCase ( self : Dict ,lowercase_ : np.ndarray ,lowercase_ : Dict[str, int] ,lowercase_ : PILImageResampling = PILImageResampling.BICUBIC ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : Optional[Any] ,):
lowerCAmelCase__ : Dict = get_size_dict(lowercase_ ,default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCAmelCase__ : Optional[Any] = get_resize_output_image_size(lowercase_ ,size=size['''shortest_edge'''] ,default_to_square=lowercase_ )
return resize(lowercase_ ,size=lowercase_ ,resample=lowercase_ ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Tuple ,lowercase_ : np.ndarray ,lowercase_ : Dict[str, int] ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : str ,):
lowerCAmelCase__ : List[str] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase_ ,size=(size['''height'''], size['''width''']) ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : np.ndarray ,lowercase_ : Union[int, float] ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : Union[str, Any] ,):
return rescale(lowercase_ ,scale=lowercase_ ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Dict ,lowercase_ : np.ndarray ,lowercase_ : Union[float, List[float]] ,lowercase_ : Union[float, List[float]] ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : int ,):
return normalize(lowercase_ ,mean=lowercase_ ,std=lowercase_ ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : ImageInput ,lowercase_ : bool = None ,lowercase_ : Dict[str, int] = None ,lowercase_ : PILImageResampling = None ,lowercase_ : bool = None ,lowercase_ : int = None ,lowercase_ : bool = None ,lowercase_ : float = None ,lowercase_ : bool = None ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : bool = None ,lowercase_ : Optional[Union[str, TensorType]] = None ,lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST ,**lowercase_ : List[Any] ,):
lowerCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : Optional[int] = size if size is not None else self.size
lowerCAmelCase__ : Union[str, Any] = get_size_dict(lowercase_ ,param_name='''size''' ,default_to_square=lowercase_ )
lowerCAmelCase__ : Union[str, Any] = resample if resample is not None else self.resample
lowerCAmelCase__ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : Dict = get_size_dict(lowercase_ ,param_name='''crop_size''' ,default_to_square=lowercase_ )
lowerCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : str = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ : Any = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ : Tuple = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ : Optional[int] = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowerCAmelCase__ : Optional[int] = [self.resize(image=lowercase_ ,size=lowercase_ ,resample=lowercase_ ) for image in images]
if do_center_crop:
lowerCAmelCase__ : Tuple = [self.center_crop(image=lowercase_ ,size=lowercase_ ) for image in images]
if do_rescale:
lowerCAmelCase__ : Tuple = [self.rescale(image=lowercase_ ,scale=lowercase_ ) for image in images]
if do_normalize:
lowerCAmelCase__ : Union[str, Any] = [self.normalize(image=lowercase_ ,mean=lowercase_ ,std=lowercase_ ) for image in images]
lowerCAmelCase__ : Optional[Any] = [to_channel_dimension_format(lowercase_ ,lowercase_ ) for image in images]
lowerCAmelCase__ : List[Any] = {'''pixel_values''': images}
return BatchFeature(data=lowercase_ ,tensor_type=lowercase_ )
| 106 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__A = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: List[str] =inspect.getfile(accelerate.test_utils)
lowerCamelCase__: str =os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps", "test_metrics.py"])
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase__: str =test_metrics
@require_cpu
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1)
@require_cpu
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
debug_launcher(self.test_metrics.main)
@require_single_gpu
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""")
lowerCamelCase__: Optional[Any] =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy())
| 273 | 1 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str:
snake_case__ : Optional[int] = old_name
if "patch_embed" in old_name:
snake_case__ , snake_case__ , snake_case__ : Optional[int] = old_name.split(""".""" )
if layer == "0":
snake_case__ : List[Any] = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
snake_case__ : Tuple = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
snake_case__ : Dict = old_name.replace("""3""" , """convolution2""" )
else:
snake_case__ : Optional[int] = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(r"""\d\.\d""" , _lowerCAmelCase ):
snake_case__ : Tuple = r"""\b\d{2}\b"""
if bool(re.search(_lowerCAmelCase , _lowerCAmelCase ) ):
snake_case__ : Optional[Any] = re.search(r"""\d\.\d\d.""" , _lowerCAmelCase ).group()
else:
snake_case__ : List[Any] = re.search(r"""\d\.\d.""" , _lowerCAmelCase ).group()
if int(match[0] ) < 6:
snake_case__ : Optional[Any] = old_name.replace(_lowerCAmelCase , """""" )
snake_case__ : Any = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
snake_case__ : Tuple = """intermediate_stages.""" + trimmed_name
else:
snake_case__ : Optional[int] = old_name.replace(_lowerCAmelCase , """""" )
if int(match[2] ) < num_meta4D_last_stage:
snake_case__ : Dict = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
snake_case__ : Tuple = str(int(match[2] ) - num_meta4D_last_stage )
snake_case__ : List[str] = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
snake_case__ : Tuple = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
snake_case__ : Union[str, Any] = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
snake_case__ : Optional[int] = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
snake_case__ : Dict = trimmed_name.replace("""fc2""" , """linear_out""" )
snake_case__ : Any = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(r""".\d.""" , _lowerCAmelCase ):
snake_case__ : Dict = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
snake_case__ : Optional[Any] = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
snake_case__ : Optional[Any] = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
snake_case__ : Optional[int] = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
snake_case__ : str = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
snake_case__ : Any = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
snake_case__ : Any = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
snake_case__ : List[Any] = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
snake_case__ : Union[str, Any] = new_name.replace("""norm""" , """layernorm""" )
snake_case__ : int = """efficientformer.""" + new_name
else:
snake_case__ : Optional[int] = """efficientformer.encoder.""" + new_name
return new_name
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
for key in checkpoint.copy().keys():
snake_case__ : Dict = checkpoint.pop(_lowerCAmelCase )
snake_case__ : int = val
return checkpoint
def __snake_case( ) -> Any:
snake_case__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : List[str] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return image
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
snake_case__ : Dict = torch.load(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
snake_case__ : Any = EfficientFormerConfig.from_json_file(_lowerCAmelCase )
snake_case__ : Any = EfficientFormerForImageClassificationWithTeacher(_lowerCAmelCase )
snake_case__ : List[Any] = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
snake_case__ : Optional[Any] = config.depths[-1] - config.num_metaad_blocks + 1
snake_case__ : Optional[int] = convert_torch_checkpoint(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
snake_case__ : Any = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
snake_case__ : List[Any] = prepare_img()
snake_case__ : Dict = 256
snake_case__ : Any = 224
snake_case__ : Any = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
snake_case__ : Tuple = processor(images=_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
# original processing pipeline
snake_case__ : Union[str, Any] = Compose(
[
Resize(_lowerCAmelCase , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(_lowerCAmelCase ),
ToTensor(),
Normalize(_lowerCAmelCase , _lowerCAmelCase ),
] )
snake_case__ : Optional[int] = image_transforms(_lowerCAmelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Any = model(_lowerCAmelCase )
snake_case__ : Union[str, Any] = outputs.logits
snake_case__ : int = (1, 1_000)
if "l1" in model_name:
snake_case__ : List[Any] = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
snake_case__ : str = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
snake_case__ : List[str] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7" )
# Save Checkpoints
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
processor.save_pretrained(_lowerCAmelCase )
print(f"Processor successfuly saved at {pytorch_dump_path}" )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
processor.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
__a = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 35 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__a = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = field(default=_a , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase = field(
default=_a , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase = field(
default=_a , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase = field(
default=_a , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase = field(
default=_a , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def lowerCamelCase ( self : List[str] ):
snake_case__ : int = super().to_dict()
for k, v in d.items():
if isinstance(snake_case_ , snake_case_ ):
snake_case__ : Optional[int] = v.to_dict()
return d
| 35 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """deberta-v2"""
def __init__(self , SCREAMING_SNAKE_CASE_=12_81_00 , SCREAMING_SNAKE_CASE_=15_36 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=61_44 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-7 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=-1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="gelu" , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = relative_attention
UpperCamelCase__ = max_relative_positions
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = position_biased_input
# Backwards compatibility
if type(SCREAMING_SNAKE_CASE_ ) == str:
UpperCamelCase__ = [x.strip() for x in pos_att_type.lower().split("""|""" )]
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = vocab_size
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = kwargs.get("""pooler_hidden_size""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = pooler_dropout
UpperCamelCase__ = pooler_hidden_act
class __A( __lowerCamelCase ):
"""simple docstring"""
@property
def UpperCAmelCase_ (self ):
if self.task == "multiple-choice":
UpperCamelCase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCamelCase__ = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def UpperCAmelCase_ (self ):
return 12
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = 40 , SCREAMING_SNAKE_CASE_ = 40 , SCREAMING_SNAKE_CASE_ = None , ):
UpperCamelCase__ = super().generate_dummy_inputs(preprocessor=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 178 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , **SCREAMING_SNAKE_CASE_ ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = {}
UpperCamelCase__ = {}
UpperCamelCase__ = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCamelCase__ = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
UpperCamelCase__ = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
UpperCamelCase__ = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
UpperCamelCase__ = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
UpperCamelCase__ = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCamelCase__ = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
UpperCamelCase__ = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
UpperCamelCase__ = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
UpperCamelCase__ = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
UpperCamelCase__ = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
UpperCamelCase__ = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
UpperCamelCase__ = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ):
return super().__call__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , num_workers=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 5_12 / 15_00 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 1 , ):
UpperCamelCase__ = load_image(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.image_processor.size["""longest_edge"""]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.image_processor.generate_crop_boxes(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
UpperCamelCase__ = self.get_inference_context()
with inference_context():
UpperCamelCase__ = self._ensure_tensor_on_device(SCREAMING_SNAKE_CASE_ , device=self.device )
UpperCamelCase__ = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
UpperCamelCase__ = image_embeddings
UpperCamelCase__ = grid_points.shape[1]
UpperCamelCase__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = grid_points[:, i : i + points_per_batch, :, :]
UpperCamelCase__ = input_labels[:, i : i + points_per_batch]
UpperCamelCase__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.88 , SCREAMING_SNAKE_CASE_=0.95 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , ):
UpperCamelCase__ = model_inputs.pop("""input_boxes""" )
UpperCamelCase__ = model_inputs.pop("""is_last""" )
UpperCamelCase__ = model_inputs.pop("""original_sizes""" ).tolist()
UpperCamelCase__ = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
UpperCamelCase__ = self.model(**SCREAMING_SNAKE_CASE_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCamelCase__ = model_outputs["""pred_masks"""]
UpperCamelCase__ = self.image_processor.post_process_masks(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , binarize=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model_outputs["""iou_scores"""]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.7 , ):
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.image_processor.post_process_for_mask_generation(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {}
if output_rle_mask:
UpperCamelCase__ = rle_mask
if output_bboxes_mask:
UpperCamelCase__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 178 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case_ ( __A ):
__A : Optional[Any] = "ClapFeatureExtractor"
__A : Optional[Any] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : Optional[int] , lowercase_ : Any , lowercase_ : Optional[Any] ) -> Any:
super().__init__(lowercase_ , lowercase_ )
def __call__( self : List[Any] , lowercase_ : List[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Any=None , **lowercase_ : Any ) -> Optional[Any]:
lowercase__ : Any = kwargs.pop("sampling_rate" , lowercase_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
lowercase__ : str = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if audios is not None:
lowercase__ : Dict = self.feature_extractor(
lowercase_ , sampling_rate=lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and audios is not None:
lowercase__ : Dict = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : Tuple ) -> Union[str, Any]:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
lowercase__ : Optional[Any] = self.tokenizer.model_input_names
lowercase__ : Any = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 87 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=False ) -> str:
'''simple docstring'''
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(_UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
UpperCAmelCase__ = parse_flag_from_env("RUN_SLOW", default=False)
def A ( _UpperCAmelCase : List[str] ) -> List[str]:
'''simple docstring'''
return unittest.skip('Test was skipped' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> str:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : str ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=None ) -> Dict:
'''simple docstring'''
if test_case is None:
return partial(_UpperCAmelCase , version=_UpperCAmelCase )
return unittest.skipUnless(is_torch_version('>=' , _UpperCAmelCase ) , F"test requires torch version >= {version}" )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCAmelCase )
UpperCAmelCase__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A ( _UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCAmelCase )
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase = True
@classmethod
def _lowerCamelCase ( cls : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = tempfile.mkdtemp()
@classmethod
def _lowerCamelCase ( cls : Union[str, Any]) -> str:
"""simple docstring"""
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def _lowerCamelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('**/*'):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A)
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[int] , A : Union[mock.Mock, List[mock.Mock]]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = mocks if isinstance(A , (tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = AcceleratorState()
_UpperCAmelCase = tensor[None].clone().to(state.device )
_UpperCAmelCase = gather(_UpperCAmelCase ).cpu()
_UpperCAmelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCAmelCase ):
return False
return True
class __lowerCAmelCase :
def __init__( self : Optional[Any] , A : Union[str, Any] , A : Optional[int] , A : str) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def A ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(_UpperCAmelCase )
else:
break
async def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Union[str, Any]=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(_UpperCAmelCase ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str="" ):
_UpperCAmelCase = line.decode('utf-8' ).rstrip()
sink.append(_UpperCAmelCase )
if not quiet:
print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=_UpperCAmelCase , )
return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=180 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : List[Any]=True ) -> _RunOutput:
'''simple docstring'''
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase ) )
_UpperCAmelCase = ' '.join(_UpperCAmelCase )
if result.returncode > 0:
_UpperCAmelCase = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class __lowerCAmelCase ( A ):
pass
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str=False ) -> Tuple:
'''simple docstring'''
try:
_UpperCAmelCase = subprocess.check_output(_UpperCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCAmelCase , 'decode' ):
_UpperCAmelCase = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(_UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 339 | 0 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class _A ( __lowercase , unittest.TestCase ):
lowercase__: Union[str, Any] = AlbertTokenizer
lowercase__: int = AlbertTokenizerFast
lowercase__: str = True
lowercase__: List[Any] = True
lowercase__: Optional[Any] = True
def lowercase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case : List[Any] = AlbertTokenizer(__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Any , __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : int = """this is a test"""
__snake_case : List[str] = """this is a test"""
return input_text, output_text
def lowercase__ ( self : str ) -> List[str]:
"""simple docstring"""
__snake_case : str = """<pad>"""
__snake_case : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__magic_name__ ) , 3_00_00 )
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def lowercase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__snake_case : Dict = self.get_tokenizer()
__snake_case : Tuple = self.get_rust_tokenizer()
__snake_case : Optional[Any] = """I was born in 92000, and this is falsé."""
__snake_case : str = tokenizer.tokenize(__magic_name__ )
__snake_case : Tuple = rust_tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
__snake_case : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
__snake_case : List[str] = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
__snake_case : Optional[Any] = self.get_rust_tokenizer()
__snake_case : List[str] = tokenizer.encode(__magic_name__ )
__snake_case : Optional[Any] = rust_tokenizer.encode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def lowercase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = AlbertTokenizer(__magic_name__ , keep_accents=__magic_name__ )
__snake_case : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [48, 25, 21, 12_89] )
__snake_case : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__magic_name__ , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
__snake_case : List[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(__magic_name__ , [31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] )
__snake_case : str = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = AlbertTokenizer(__magic_name__ )
__snake_case : List[Any] = tokenizer.encode("""sequence builders""" )
__snake_case : Optional[int] = tokenizer.encode("""multi-sequence build""" )
__snake_case : Tuple = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
__snake_case : Any = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def lowercase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 371 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__UpperCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__UpperCamelCase = TaTokenizerFast
__UpperCamelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__UpperCamelCase = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 13 | 0 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( snake_case_ ):
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
super().__init__()
UpperCamelCase : Union[str, Any] = nn.ModuleList(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = True, ) -> Any:
for i, (image, scale, controlnet) in enumerate(zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, self.nets ) ):
UpperCamelCase , UpperCamelCase : str = controlnet(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, )
# merge samples
if i == 0:
UpperCamelCase , UpperCamelCase : str = down_samples, mid_sample
else:
UpperCamelCase : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, ) -> Optional[int]:
UpperCamelCase : Any = 0
UpperCamelCase : str = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
SCREAMING_SNAKE_CASE_, is_main_process=SCREAMING_SNAKE_CASE_, save_function=SCREAMING_SNAKE_CASE_, safe_serialization=SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_, )
idx += 1
UpperCamelCase : Union[str, Any] = model_path_to_save + F"""_{idx}"""
@classmethod
def snake_case_ ( cls, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Tuple = 0
UpperCamelCase : Tuple = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
UpperCamelCase : str = pretrained_model_path
while os.path.isdir(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = ControlNetModel.from_pretrained(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
controlnets.append(SCREAMING_SNAKE_CASE_ )
idx += 1
UpperCamelCase : str = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(SCREAMING_SNAKE_CASE_ )} controlnets loaded from {pretrained_model_path}.""" )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(SCREAMING_SNAKE_CASE_ )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(SCREAMING_SNAKE_CASE_ )
| 119 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
__a :Any = TypeVar('T')
__a :Union[str, Any] = Union[List[T], Tuple[T, ...]]
__a :List[str] = Union[T, List[T], Dict[str, T]]
__a :Any = Union[str, bytes, os.PathLike] | 312 | 0 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def A ( lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = model.config
UpperCamelCase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
UpperCamelCase = MBartConfig(
is_decoder=lowercase , is_encoder_decoder=lowercase , add_cross_attention=lowercase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowercase , add_final_layer_norm=lowercase , )
return encoder_config, decoder_config
def A ( lowercase ) -> Optional[Any]:
'''simple docstring'''
if "encoder.model" in name:
UpperCamelCase = name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
UpperCamelCase = name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
UpperCamelCase = 'encoder.' + name
if "attn.proj" in name:
UpperCamelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
UpperCamelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
UpperCamelCase = 'encoder.layernorm.weight'
if name == "encoder.norm.bias":
UpperCamelCase = 'encoder.layernorm.bias'
return name
def A ( lowercase , lowercase ) -> List[str]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(lowercase )
if "qkv" in key:
UpperCamelCase = key.split('.' )
UpperCamelCase = int(key_split[3] )
UpperCamelCase = int(key_split[5] )
UpperCamelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
UpperCamelCase = val
return orig_state_dict
def A ( lowercase , lowercase=None , lowercase=False ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = DonutModel.from_pretrained(lowercase ).eval()
# load HuggingFace model
UpperCamelCase , UpperCamelCase = get_configs(lowercase )
UpperCamelCase = DonutSwinModel(lowercase )
UpperCamelCase = MBartForCausalLM(lowercase )
UpperCamelCase = VisionEncoderDecoderModel(encoder=lowercase , decoder=lowercase )
model.eval()
UpperCamelCase = original_model.state_dict()
UpperCamelCase = convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
# verify results on scanned document
UpperCamelCase = load_dataset('hf-internal-testing/example-documents' )
UpperCamelCase = dataset['test'][0]['image'].convert('RGB' )
UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained(lowercase , from_slow=lowercase )
UpperCamelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
UpperCamelCase = DonutProcessor(lowercase , lowercase )
UpperCamelCase = processor(lowercase , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
UpperCamelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
UpperCamelCase = 'When is the coffee break?'
UpperCamelCase = task_prompt.replace('{user_input}' , lowercase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
UpperCamelCase = '<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
UpperCamelCase = '<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
UpperCamelCase = 's_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
UpperCamelCase = '<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
UpperCamelCase = 'hello world'
else:
raise ValueError('Model name not supported' )
UpperCamelCase = original_model.decoder.tokenizer(lowercase , add_special_tokens=lowercase , return_tensors='pt' )[
'input_ids'
]
UpperCamelCase = original_model.encoder.model.patch_embed(lowercase )
UpperCamelCase , UpperCamelCase = model.encoder.embeddings(lowercase )
assert torch.allclose(lowercase , lowercase , atol=1e-3 )
# verify encoder hidden states
UpperCamelCase = original_model.encoder(lowercase )
UpperCamelCase = model.encoder(lowercase ).last_hidden_state
assert torch.allclose(lowercase , lowercase , atol=1e-2 )
# verify decoder hidden states
UpperCamelCase = original_model(lowercase , lowercase , lowercase ).logits
UpperCamelCase = model(lowercase , decoder_input_ids=lowercase ).logits
assert torch.allclose(lowercase , lowercase , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 352 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase :
def __init__( self , A_ , A_=2 , A_=3 , A_=4 , A_=2 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=36 , A_=3 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=6 , A_=6 , A_=3 , A_=4 , A_=None , A_=1_000 , ) -> str:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = text_seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = coordinate_size
UpperCamelCase = shape_size
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
UpperCamelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase = text_seq_length
UpperCamelCase = (image_size // patch_size) ** 2 + 1
UpperCamelCase = self.text_seq_length + self.image_seq_length
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase = bbox[i, j, 3]
UpperCamelCase = bbox[i, j, 1]
UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase = bbox[i, j, 2]
UpperCamelCase = bbox[i, j, 0]
UpperCamelCase = t
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = LayoutLMvaModel(config=A_ )
model.to(A_ )
model.eval()
# text + image
UpperCamelCase = model(A_ , pixel_values=A_ )
UpperCamelCase = model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ )
UpperCamelCase = model(A_ , bbox=A_ , pixel_values=A_ , token_type_ids=A_ )
UpperCamelCase = model(A_ , bbox=A_ , pixel_values=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase = model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase = model(pixel_values=A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = LayoutLMvaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = LayoutLMvaForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = LayoutLMvaForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Tuple = False
__lowercase : List[Any] = False
__lowercase : str = False
__lowercase : Tuple = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase : List[str] = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ ) -> int:
"""simple docstring"""
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = LayoutLMvaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase ( self , A_ , A_ , A_=False ) -> int:
"""simple docstring"""
UpperCamelCase = copy.deepcopy(A_ )
if model_class in get_values(A_ ):
UpperCamelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(A_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A_ ):
UpperCamelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=A_ )
elif model_class in get_values(A_ ):
UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
elif model_class in [
*get_values(A_ ),
]:
UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
elif model_class in [
*get_values(A_ ),
]:
UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=A_ , )
return inputs_dict
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
@slow
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = LayoutLMvaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A ( ) -> int:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=A_ ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).pixel_values.to(A_ )
UpperCamelCase = torch.tensor([[1, 2]] )
UpperCamelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
UpperCamelCase = model(
input_ids=input_ids.to(A_ ) , bbox=bbox.to(A_ ) , pixel_values=pixel_values.to(A_ ) , )
# verify the logits
UpperCamelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , A_ )
UpperCamelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(A_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , A_ , atol=1e-4 ) )
| 110 | 0 |
"""simple docstring"""
import os
import string
import sys
__snake_case = 1 << 8
__snake_case = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
__snake_case = KEYMAP["""up"""]
__snake_case = KEYMAP["""left"""]
if sys.platform == "win32":
__snake_case = []
__snake_case = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
__snake_case = ord(str(i))
def __lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
if os.name == "nt":
import msvcrt
snake_case : Any = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
snake_case : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case : Union[str, Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case : Dict = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
snake_case : str = chr(KEYMAP["esc"] )
except KeyError:
snake_case : Optional[int] = cha[1]
else:
snake_case : str = ch.decode(_A )
else:
snake_case : Optional[Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case : List[str] = sys.stdin.fileno()
snake_case : str = termios.tcgetattr(_A )
try:
tty.setraw(_A )
snake_case : str = sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def __lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
snake_case : Dict = get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
snake_case : Any = get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
snake_case : Tuple = get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 203 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self, **lowercase_ ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase_ )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__( self, lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
return super().__call__(lowercase_, **lowercase_ )
def _UpperCAmelCase ( self, **lowercase_ ) -> int:
"""simple docstring"""
a__ ={}
if "candidate_labels" in kwargs:
a__ =kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
a__ =kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _UpperCAmelCase ( self, lowercase_, lowercase_=None, lowercase_="This is a sound of {}." ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(lowercase_, lowercase_ ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
a__ =requests.get(lowercase_ ).content
else:
with open(lowercase_, '''rb''' ) as f:
a__ =f.read()
if isinstance(lowercase_, lowercase_ ):
a__ =ffmpeg_read(lowercase_, self.feature_extractor.sampling_rate )
if not isinstance(lowercase_, np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
a__ =self.feature_extractor(
[audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='''pt''' )
a__ =candidate_labels
a__ =[hypothesis_template.format(lowercase_ ) for x in candidate_labels]
a__ =self.tokenizer(lowercase_, return_tensors=self.framework, padding=lowercase_ )
a__ =[text_inputs]
return inputs
def _UpperCAmelCase ( self, lowercase_ ) -> str:
"""simple docstring"""
a__ =model_inputs.pop('''candidate_labels''' )
a__ =model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0], lowercase_ ):
a__ =text_inputs[0]
else:
# Batching case.
a__ =text_inputs[0][0]
a__ =self.model(**lowercase_, **lowercase_ )
a__ ={
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def _UpperCAmelCase ( self, lowercase_ ) -> Any:
"""simple docstring"""
a__ =model_outputs.pop('''candidate_labels''' )
a__ =model_outputs['''logits'''][0]
if self.framework == "pt":
a__ =logits.softmax(dim=0 )
a__ =probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
a__ =[
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowercase_, lowercase_ ), key=lambda lowercase_ : -x[0] )
]
return result
| 188 | 0 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Any = LongformerTokenizer
lowercase__ : int = True
lowercase__ : Optional[int] = LongformerTokenizerFast
lowercase__ : List[Any] = True
def __SCREAMING_SNAKE_CASE ( self ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCAmelCase__ = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
lowerCAmelCase__ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCAmelCase__ = {'''unk_token''': '''<unk>'''}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase_ ) )
def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> Any:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = '''lower newer'''
lowerCAmelCase__ = '''lower newer'''
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase__ = '''lower newer'''
lowerCAmelCase__ = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCAmelCase__ = tokenizer.tokenize(lowerCamelCase_ ) # , add_prefix_space=True)
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowerCamelCase_ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowerCamelCase_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
lowerCAmelCase__ = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.encode(
'''sequence builders''' , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = '''Encode this sequence.'''
lowerCAmelCase__ = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_ )
# Testing spaces after special tokens
lowerCAmelCase__ = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )} ) # mask token has a left space
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
lowerCAmelCase__ = '''Encode <mask> sequence'''
lowerCAmelCase__ = '''Encode <mask>sequence'''
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ )
lowerCAmelCase__ = encoded.index(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ )
lowerCAmelCase__ = encoded.index(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = '''A, <mask> AllenNLP sentence.'''
lowerCAmelCase__ = tokenizer_r.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCAmelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCAmelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowerCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
lowerCAmelCase__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCAmelCase__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowerCamelCase_ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , lowerCamelCase_ )
self.assertEqual(post_processor_state['''trim_offsets'''] , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase__ = F"""{text_of_1_token} {text_of_1_token}"""
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase_ ) + 1, len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase_ ) + 1, len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase_ ), len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase_ ), len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
lowerCAmelCase__ = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase_ ) + 1, 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase_ ), 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase_ ), 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , ) | 228 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A , A , A ) -> Optional[Any]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def _snake_case ( A , A , A ) -> Union[str, Any]:
lowerCAmelCase__ = to_pil_image(A )
lowerCAmelCase__ , lowerCAmelCase__ = pil_image.size
lowerCAmelCase__ = pytesseract.image_to_data(A , lang=A , output_type='''dict''' , config=A )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowerCAmelCase__ = [idx for idx, word in enumerate(A ) if not word.strip()]
lowerCAmelCase__ = [word for idx, word in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase__ = []
for x, y, w, h in zip(A , A , A , A ):
lowerCAmelCase__ = [x, y, x + w, y + h]
actual_boxes.append(A )
# finally, normalize the bounding boxes
lowerCAmelCase__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(A , A , A ) )
assert len(A ) == len(A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Any = ["pixel_values"]
def __init__( self , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = PILImageResampling.BILINEAR , lowerCamelCase_ = True , lowerCamelCase_ = 1 / 2_55 , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = "" , **lowerCamelCase_ , ) -> None:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_value
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCAmelCase__ = apply_ocr
lowerCAmelCase__ = ocr_lang
lowerCAmelCase__ = tesseract_config
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = PILImageResampling.BILINEAR , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCAmelCase__ = (size['''height'''], size['''width'''])
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_=None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = ChannelDimension.FIRST , **lowerCamelCase_ , ) -> PIL.Image.Image:
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ )
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase__ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase__ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase__ = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(lowerCamelCase_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for image in images:
lowerCAmelCase__ , lowerCAmelCase__ = apply_tesseract(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
words_batch.append(lowerCamelCase_ )
boxes_batch.append(lowerCamelCase_ )
if do_resize:
lowerCAmelCase__ = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
lowerCAmelCase__ = BatchFeature(data={'''pixel_values''': images} , tensor_type=lowerCamelCase_ )
if apply_ocr:
lowerCAmelCase__ = words_batch
lowerCAmelCase__ = boxes_batch
return data | 228 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = b.T
SCREAMING_SNAKE_CASE_ : List[Any] = np.sum(np.square(lowerCAmelCase ) , axis=1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.sum(np.square(lowerCAmelCase ) , axis=0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.matmul(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = aa[:, None] - 2 * ab + ba[None, :]
return d
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = x.reshape(-1 , 3 )
SCREAMING_SNAKE_CASE_ : Tuple = squared_euclidean_distance(lowerCAmelCase , lowerCAmelCase )
return np.argmin(lowerCAmelCase , axis=1 )
class a__ ( A__ ):
A = ['pixel_values']
def __init__( self : Any,_A : Optional[Union[List[List[int]], np.ndarray]] = None,_A : bool = True,_A : Dict[str, int] = None,_A : PILImageResampling = PILImageResampling.BILINEAR,_A : bool = True,_A : bool = True,**_A : int,):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Any = size if size is not None else {"height": 256, "width": 256}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(_A )
SCREAMING_SNAKE_CASE_ : str = np.array(_A ) if clusters is not None else None
SCREAMING_SNAKE_CASE_ : int = do_resize
SCREAMING_SNAKE_CASE_ : List[str] = size
SCREAMING_SNAKE_CASE_ : Dict = resample
SCREAMING_SNAKE_CASE_ : List[str] = do_normalize
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_color_quantize
def __UpperCamelCase ( self : Any,_A : np.ndarray,_A : Dict[str, int],_A : PILImageResampling = PILImageResampling.BILINEAR,_A : Optional[Union[str, ChannelDimension]] = None,**_A : Union[str, Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
_A,size=(size["height"], size["width"]),resample=_A,data_format=_A,**_A )
def __UpperCamelCase ( self : str,_A : np.ndarray,_A : Optional[Union[str, ChannelDimension]] = None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = rescale(image=_A,scale=1 / 127.5,data_format=_A )
SCREAMING_SNAKE_CASE_ : Dict = image - 1
return image
def __UpperCamelCase ( self : Optional[Any],_A : ImageInput,_A : bool = None,_A : Dict[str, int] = None,_A : PILImageResampling = None,_A : bool = None,_A : Optional[bool] = None,_A : Optional[Union[List[List[int]], np.ndarray]] = None,_A : Optional[Union[str, TensorType]] = None,_A : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST,**_A : List[str],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : str = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(_A )
SCREAMING_SNAKE_CASE_ : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : int = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
SCREAMING_SNAKE_CASE_ : int = clusters if clusters is not None else self.clusters
SCREAMING_SNAKE_CASE_ : List[Any] = np.array(_A )
SCREAMING_SNAKE_CASE_ : Any = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : int = [to_numpy_array(_A ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : List[Any] = [self.resize(image=_A,size=_A,resample=_A ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : int = [self.normalize(image=_A ) for image in images]
if do_color_quantize:
SCREAMING_SNAKE_CASE_ : List[Any] = [to_channel_dimension_format(_A,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
SCREAMING_SNAKE_CASE_ : Dict = np.array(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = color_quantize(_A,_A ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
SCREAMING_SNAKE_CASE_ : Optional[int] = images.shape[0]
SCREAMING_SNAKE_CASE_ : List[Any] = images.reshape(_A,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
SCREAMING_SNAKE_CASE_ : str = list(_A )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [to_channel_dimension_format(_A,_A ) for image in images]
SCREAMING_SNAKE_CASE_ : List[Any] = {"input_ids": images}
return BatchFeature(data=_A,tensor_type=_A )
| 18 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""input_features""", """attention_mask"""]
def __init__( self : Tuple , __lowercase : Optional[int]=80 , __lowercase : Optional[int]=1_60_00 , __lowercase : str=80 , __lowercase : Dict=0.0 , __lowercase : int=True , __lowercase : List[Any]=True , __lowercase : List[Any]=True , **__lowercase : int , ) -> int:
super().__init__(feature_size=__lowercase , sampling_rate=__lowercase , padding_value=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =num_mel_bins
SCREAMING_SNAKE_CASE__ : Optional[Any] =do_ceptral_normalize
SCREAMING_SNAKE_CASE__ : List[str] =normalize_means
SCREAMING_SNAKE_CASE__ : Optional[int] =normalize_vars
SCREAMING_SNAKE_CASE__ : Tuple =True
def __magic_name__ ( self : Tuple , __lowercase : np.ndarray , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : Any =waveform * (2**15) # Kaldi compliance: 16-bit signed integers
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.from_numpy(__lowercase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] =ta_kaldi.fbank(__lowercase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __magic_name__ ( __lowercase : np.ndarray , __lowercase : int , __lowercase : Optional[bool] = True , __lowercase : Optional[bool] = True , __lowercase : float = 0.0 , ) -> np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
SCREAMING_SNAKE_CASE__ : str =x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.subtract(__lowercase , __lowercase )
if normalize_vars:
SCREAMING_SNAKE_CASE__ : Tuple =x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.divide(__lowercase , __lowercase )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE__ : Tuple =padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE__ : str =x.astype(np.floataa )
return x
def __magic_name__ ( self : Optional[int] , __lowercase : List[np.ndarray] , __lowercase : Optional[np.ndarray] = None ) -> List[np.ndarray]:
SCREAMING_SNAKE_CASE__ : int =attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__lowercase , __lowercase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__lowercase , __lowercase )
]
def __call__( self : int , __lowercase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowercase : Union[bool, str, PaddingStrategy] = False , __lowercase : Optional[int] = None , __lowercase : bool = False , __lowercase : Optional[int] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Optional[int] = None , __lowercase : Optional[bool] = None , **__lowercase : str , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE__ : List[str] =isinstance(__lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE__ : str =is_batched_numpy or (
isinstance(__lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[np.asarray(__lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowercase , np.ndarray ):
SCREAMING_SNAKE_CASE__ : int =np.asarray(__lowercase , dtype=np.floataa )
elif isinstance(__lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ : Tuple =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ : List[str] =[raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE__ : Tuple =[self._extract_fbank_features(__lowercase ) for waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE__ : Optional[int] =BatchFeature({'''input_features''': features} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.pad(
__lowercase , padding=__lowercase , max_length=__lowercase , truncation=__lowercase , pad_to_multiple_of=__lowercase , return_attention_mask=__lowercase , **__lowercase , )
# make sure list is in array format
SCREAMING_SNAKE_CASE__ : Optional[Any] =padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , __lowercase ):
SCREAMING_SNAKE_CASE__ : Any =[np.asarray(__lowercase , dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE__ : Dict =padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ : str =[np.asarray(__lowercase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
SCREAMING_SNAKE_CASE__ : str =(
np.array(__lowercase , dtype=np.intaa )
if self._get_padding_strategies(__lowercase , max_length=__lowercase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
SCREAMING_SNAKE_CASE__ : int =self.normalize(
padded_inputs['''input_features'''] , attention_mask=__lowercase )
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ : Dict =padded_inputs.convert_to_tensors(__lowercase )
return padded_inputs | 152 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {"vocab_file": "spiece.model"}
UpperCAmelCase_ : Tuple = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__=False , UpperCAmelCase__="<s>" , UpperCAmelCase__="</s>" , UpperCAmelCase__="<unk>" , UpperCAmelCase__="<sep>" , UpperCAmelCase__="<pad>" , UpperCAmelCase__="<cls>" , UpperCAmelCase__="<mask>" , UpperCAmelCase__=["<eop>", "<eod>"] , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
A__ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
A__ = 3
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
A__ = jieba
A__ = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __A ( self ):
return len(self.sp_model )
def __A ( self ):
A__ = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self , UpperCAmelCase__ ):
A__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self , UpperCAmelCase__ ):
if self.remove_space:
A__ = " ".join(inputs.strip().split() )
else:
A__ = inputs
A__ = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
A__ = unicodedata.normalize("NFKD" , UpperCAmelCase__ )
A__ = "".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] )
if self.do_lower_case:
A__ = outputs.lower()
return outputs
def __A ( self , UpperCAmelCase__ ):
A__ = self.preprocess_text(UpperCAmelCase__ )
A__ = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
A__ = []
for piece in pieces:
if len(UpperCAmelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
A__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ = cur_pieces[1:]
else:
A__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase__ )
else:
new_pieces.append(UpperCAmelCase__ )
return new_pieces
def __A ( self , UpperCAmelCase__ ):
return self.sp_model.PieceToId(UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ ):
return self.sp_model.IdToPiece(UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ ):
A__ = "".join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , " " ).strip()
return out_string
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1]
return ([0] * len(UpperCAmelCase__ )) + [1, 1]
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
A__ = [self.sep_token_id]
A__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , "wb" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
def __A ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
A__ = super()._decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
A__ = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 198 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
UpperCAmelCase_ : Dict = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
UpperCAmelCase_ : Any = {
"169M": 768,
"430M": 1_024,
"1B5": 2_048,
"3B": 2_560,
"7B": 4_096,
"14B": 5_120,
}
def UpperCamelCase ( _A : Dict )-> Optional[int]:
"""simple docstring"""
A__ = list(state_dict.keys() )
for name in state_dict_keys:
A__ = state_dict.pop(_A )
# emb -> embedding
if name.startswith("emb." ):
A__ = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
A__ = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
A__ = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , _A )
# ffn -> feed_forward
A__ = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , _A )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
A__ = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
A__ = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
A__ = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
A__ = "rwkv." + name
A__ = weight
return state_dict
def UpperCamelCase ( _A : str , _A : List[Any] , _A : List[Any] , _A : int=None , _A : List[str]=None , _A : Dict=False , _A : List[Any]=None )-> str:
"""simple docstring"""
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
A__ = 50277
A__ = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
A__ = PreTrainedTokenizerFast(tokenizer_file=_A )
A__ = len(_A )
tokenizer.save_pretrained(_A )
# 2. Build the config
A__ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
A__ = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
A__ = RwkvConfig(
vocab_size=_A , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_A )
# 3. Download model file then convert state_dict
A__ = hf_hub_download(_A , _A )
A__ = torch.load(_A , map_location="cpu" )
A__ = convert_state_dict(_A )
# 4. Split in shards and save
A__ , A__ = shard_checkpoint(_A )
for shard_file, shard in shards.items():
torch.save(_A , os.path.join(_A , _A ) )
if index is not None:
A__ = os.path.join(_A , _A )
# Save the index as well
with open(_A , "w" , encoding="utf-8" ) as f:
A__ = json.dumps(_A , indent=2 , sort_keys=_A ) + "\n"
f.write(_A )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
A__ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
A__ = torch.load(os.path.join(_A , _A ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_A , _A ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
A__ = AutoModelForCausalLM.from_pretrained(_A )
model.push_to_hub(_A , max_shard_size="2GB" )
tokenizer.push_to_hub(_A )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 198 | 1 |
lowercase_ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.3_5_5_8_1_8,
}
def a ( A__ : str , A__ : str , A__ : float ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
_lowercase =(
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(A__ )}'''
)
raise ValueError(A__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 205 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 205 | 1 |
from __future__ import annotations
def UpperCamelCase ( snake_case__ : str ) -> list[int]:
return [ord(snake_case__ ) - 96 for elem in plain]
def UpperCamelCase ( snake_case__ : list[int] ) -> str:
return "".join(chr(elem + 96 ) for elem in encoded )
def UpperCamelCase ( ) -> None:
UpperCamelCase : List[str] = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , snake_case__ )
print('Decoded:' , decode(snake_case__ ) )
if __name__ == "__main__":
main()
| 103 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase_ :
@staticmethod
def snake_case_ ( *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Dict:
pass
def UpperCamelCase ( snake_case__ : Image ) -> str:
UpperCamelCase : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase__ : str = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase : List[str] = DepthEstimationPipeline(model=SCREAMING_SNAKE_CASE_, image_processor=SCREAMING_SNAKE_CASE_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Union[str, Any] = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, SCREAMING_SNAKE_CASE_ )
import datasets
UpperCamelCase : int = datasets.load_dataset('hf-internal-testing/fixtures_image_utils', 'image', split='test' )
UpperCamelCase : Tuple = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
], SCREAMING_SNAKE_CASE_, )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def snake_case_ ( self ) -> Optional[int]:
pass
@slow
@require_torch
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Optional[Any] = 'Intel/dpt-large'
UpperCamelCase : Tuple = pipeline('depth-estimation', model=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
UpperCamelCase : int = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ), 29.3_04 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ), 2.6_62 )
@require_torch
def snake_case_ ( self ) -> Optional[int]:
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 103 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=3_0 , snake_case=2 , snake_case=3 , snake_case=True , snake_case=True , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=1_0 , snake_case=0.02 , snake_case=None , snake_case=2 , ):
'''simple docstring'''
UpperCAmelCase : str = parent
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : str = image_size
UpperCAmelCase : Tuple = patch_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : str = use_labels
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : List[str] = attention_probs_dropout_prob
UpperCAmelCase : int = type_sequence_label_size
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Dict = scope
UpperCAmelCase : Dict = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase : List[Any] = (image_size // patch_size) ** 2
UpperCAmelCase : Dict = num_patches + 1
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : str = None
if self.use_labels:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = ViTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase : Optional[int] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = ViTForMaskedImageModeling(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase : str = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase : Dict = 1
UpperCAmelCase : List[str] = ViTForMaskedImageModeling(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.type_sequence_label_size
UpperCAmelCase : List[str] = ViTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase : Any = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : List[str] = ViTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
UpperCAmelCase
) : List[str] = config_and_inputs
UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Tuple = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : List[str] = True
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = ViTModelTester(self )
UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7 )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(lowerCAmelCase__ )
UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def A_ ( self ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Union[str, Any] = ViTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(lowerCAmelCase__ )
UpperCAmelCase : Optional[Any] = self.default_image_processor
UpperCAmelCase : str = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[int] = model(**lowerCAmelCase__ )
# verify the logits
UpperCAmelCase : Any = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
UpperCAmelCase : List[Any] = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = ViTModel.from_pretrained("facebook/dino-vits8" ).to(lowerCAmelCase__ )
UpperCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=4_8_0 )
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase : int = inputs.pixel_values.to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[int] = model(lowerCAmelCase__ , interpolate_pos_encoding=lowerCAmelCase__ )
# verify the logits
UpperCAmelCase : Tuple = torch.Size((1, 3_6_0_1, 3_8_4) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
UpperCAmelCase : int = self.default_image_processor
UpperCAmelCase : Union[str, Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase : str = inputs.pixel_values.to(lowerCAmelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase : str = model(lowerCAmelCase__ )
| 311 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[str] = ShapEPipeline
_UpperCAmelCase : Tuple = ['''prompt''']
_UpperCAmelCase : Dict = ['''prompt''']
_UpperCAmelCase : Any = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase : Optional[int] = False
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return 8
@property
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCAmelCase__)
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Tuple = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
SCREAMING_SNAKE_CASE_: Any = PriorTransformer(**lowerCAmelCase__)
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Dict):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE_: Optional[int] = ShapERenderer(**lowerCAmelCase__)
return model
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Dict = self.dummy_prior
SCREAMING_SNAKE_CASE_: Optional[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.dummy_tokenizer
SCREAMING_SNAKE_CASE_: List[str] = self.dummy_renderer
SCREAMING_SNAKE_CASE_: Any = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=lowerCAmelCase__ , clip_sample=lowerCAmelCase__ , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE_: Optional[int] = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]=0):
if str(lowerCAmelCase__).startswith("mps"):
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.manual_seed(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: Any = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: str = "cpu"
SCREAMING_SNAKE_CASE_: Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Dict = self.pipeline_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = pipe(**self.get_dummy_inputs(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[Any] = output.images[0]
SCREAMING_SNAKE_CASE_: Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Dict = torch_device == "cpu"
SCREAMING_SNAKE_CASE_: List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: str = self.pipeline_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = 1
SCREAMING_SNAKE_CASE_: Any = 2
SCREAMING_SNAKE_CASE_: Dict = self.get_dummy_inputs(lowerCAmelCase__)
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE_: List[Any] = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE_: Tuple = pipe(**lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy")
SCREAMING_SNAKE_CASE_: List[str] = ShapEPipeline.from_pretrained("openai/shap-e")
SCREAMING_SNAKE_CASE_: Optional[int] = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
SCREAMING_SNAKE_CASE_: int = pipe(
"a shark" , generator=lowerCAmelCase__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
| 13 | 0 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ (__snake_case , unittest.TestCase ):
__lowerCamelCase : List[str] = GPTSanJapaneseTokenizer
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Optional[int] = {"""do_clean_text""": False, """add_prefix_space""": False}
def snake_case_ ( self):
super().setUp()
# fmt: off
lowercase__ : List[Any] = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
lowercase__ : Any = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
lowercase__ : Optional[Any] = {'unk_token': '<unk>'}
lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
with open(self.emoji_file , 'w') as emoji_writer:
emoji_writer.write(json.dumps(a))
def snake_case_ ( self , **a):
kwargs.update(self.special_tokens_map)
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **a)
def snake_case_ ( self , a):
lowercase__ : List[Any] = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
lowercase__ : Optional[Any] = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def snake_case_ ( self , a):
lowercase__ , lowercase__ : List[str] = self.get_input_output_texts(a)
lowercase__ : Optional[Any] = tokenizer.encode(a , add_special_tokens=a)
lowercase__ : Optional[Any] = tokenizer.decode(a , clean_up_tokenization_spaces=a)
return text, ids
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
lowercase__ : Optional[Any] = self.get_tokenizer()
# Testing tokenization
lowercase__ : Tuple = 'こんにちは、世界。 こんばんは、㔺界。'
lowercase__ : Any = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
lowercase__ : Optional[int] = tokenizer.tokenize(a)
self.assertListEqual(a , a)
# Testing conversion to ids without special tokens
lowercase__ : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowercase__ : Dict = tokenizer.convert_tokens_to_ids(a)
self.assertListEqual(a , a)
# Testing conversion to ids with special tokens
lowercase__ : Tuple = tokens + [tokenizer.unk_token]
lowercase__ : Optional[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowercase__ : Tuple = tokenizer.convert_tokens_to_ids(a)
self.assertListEqual(a , a)
def snake_case_ ( self):
lowercase__ : Any = self.get_tokenizer()
# Testing tokenization
lowercase__ : Dict = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
lowercase__ : Dict = 'こんにちは、、、、世界。こんばんは、、、、世界。'
lowercase__ : Optional[int] = tokenizer.encode(a)
lowercase__ : Union[str, Any] = tokenizer.decode(a)
self.assertEqual(a , a)
@slow
def snake_case_ ( self):
lowercase__ : Optional[int] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese')
# Testing tokenization
lowercase__ : List[Any] = 'こんにちは、世界。'
lowercase__ : Tuple = 'こんばんは、㔺界。😀'
lowercase__ : Tuple = 'こんにちは、世界。こんばんは、世界。😀'
lowercase__ : List[Any] = tokenizer.encode(prefix_text + input_text)
lowercase__ : Any = tokenizer.encode('' , prefix_text=prefix_text + input_text)
lowercase__ : Optional[Any] = tokenizer.encode(a , prefix_text=a)
lowercase__ : Optional[Any] = tokenizer.decode(a)
lowercase__ : List[str] = tokenizer.decode(a)
lowercase__ : List[str] = tokenizer.decode(a)
self.assertEqual(a , a)
self.assertEqual(a , a)
self.assertEqual(a , a)
@slow
def snake_case_ ( self):
lowercase__ : str = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese')
# Testing tokenization
lowercase__ : Optional[int] = 'こんにちは、世界。'
lowercase__ : int = 'こんばんは、㔺界。😀'
lowercase__ : Optional[int] = len(tokenizer.encode(a)) - 2
lowercase__ : Optional[int] = len(tokenizer.encode(a)) - 2
lowercase__ : int = [1] + [0] * (len_prefix + len_text + 1)
lowercase__ : List[Any] = [1] * (len_prefix + len_text + 1) + [0]
lowercase__ : Optional[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowercase__ : Tuple = tokenizer(prefix_text + input_text).token_type_ids
lowercase__ : Optional[int] = tokenizer('' , prefix_text=prefix_text + input_text).token_type_ids
lowercase__ : Union[str, Any] = tokenizer(a , prefix_text=a).token_type_ids
self.assertListEqual(a , a)
self.assertListEqual(a , a)
self.assertListEqual(a , a)
@slow
def snake_case_ ( self):
lowercase__ : List[str] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese')
lowercase__ : Tuple = tokenizer.encode('あンいワ')
lowercase__ : Any = tokenizer.encode('' , prefix_text='あンいワ')
lowercase__ : List[Any] = tokenizer.encode('いワ' , prefix_text='あン')
self.assertEqual(tokenizer.decode(a) , tokenizer.decode(a))
self.assertEqual(tokenizer.decode(a) , tokenizer.decode(a))
self.assertNotEqual(a , a)
self.assertNotEqual(a , a)
self.assertEqual(x_token_a[1] , x_token_a[-1]) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3]) # SEG token
@slow
def snake_case_ ( self):
lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese')
lowercase__ : str = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
lowercase__ : Any = tokenizer(a , padding=a)
lowercase__ : Union[str, Any] = tokenizer.batch_encode_plus(a , padding=a)
# fmt: off
lowercase__ : str = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowercase__ : Any = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowercase__ : Optional[int] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , a)
self.assertListEqual(x_token.token_type_ids , a)
self.assertListEqual(x_token.attention_mask , a)
self.assertListEqual(x_token_a.input_ids , a)
self.assertListEqual(x_token_a.token_type_ids , a)
self.assertListEqual(x_token_a.attention_mask , a)
def snake_case_ ( self):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def snake_case_ ( self):
# tokenizer has no padding token
pass
| 216 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case_ = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 216 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowercase_ = ["""small""", """medium""", """large"""]
lowercase_ = """lm_head.decoder.weight"""
lowercase_ = """lm_head.weight"""
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ) ->Any:
_SCREAMING_SNAKE_CASE = torch.load(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = d.pop(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
lowercase_ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
lowercase_ = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
lowercase_ = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 58 |
from __future__ import annotations
import numpy as np
def a__ ( snake_case ):
"""simple docstring"""
return np.maximum(0 , snake_case )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 303 | 0 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
_UpperCAmelCase : List[Any] = sorted(string.lower() )
return len(__lowerCAmelCase ) == len(set(__lowerCAmelCase ) )
if __name__ == "__main__":
lowerCamelCase__ = input('Enter a string ').strip()
lowerCamelCase__ = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 322 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
lowerCamelCase__ = list[list[float | int]]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : float
for row in range(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = matrix[row][col]
_UpperCAmelCase : Optional[int] = vector[row][0]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCAmelCase , __lowerCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase : str = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __lowerCAmelCase ):
for row in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__lowerCAmelCase )
]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix = [[0] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
for x_val, y_val in enumerate(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase : int = y_val
_UpperCAmelCase : List[str] = solve(__lowerCAmelCase , __lowerCAmelCase )
def interpolated_func(__lowerCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCAmelCase ) )
return interpolated_func
def __lowerCAmelCase (__lowerCAmelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __lowerCAmelCase (__lowerCAmelCase = question_function , __lowerCAmelCase = 10 ):
_UpperCAmelCase : list[int] = [func(__lowerCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Callable[[int], int]
_UpperCAmelCase : int
for poly in polynomials:
_UpperCAmelCase : int = 1
while func(__lowerCAmelCase ) == poly(__lowerCAmelCase ):
x_val += 1
ret += poly(__lowerCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 322 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase_ ( _UpperCamelCase ):
__lowerCamelCase : Any = ["pixel_values"]
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = True , **_lowerCAmelCase , ) -> Optional[int]:
super().__init__(**_UpperCAmelCase )
_lowerCAmelCase = size if size is not None else {'''shortest_edge''': 224}
_lowerCAmelCase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_lowerCAmelCase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase , param_name="crop_size" )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = resample
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowerCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
_lowerCAmelCase = do_convert_rgb
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> Dict:
_lowerCAmelCase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowerCAmelCase = get_resize_output_image_size(_UpperCAmelCase , size=size["shortest_edge"] , default_to_square=_UpperCAmelCase )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> Optional[Any]:
_lowerCAmelCase = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_UpperCAmelCase , size=(size["height"], size["width"]) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> Optional[int]:
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> Optional[int]:
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> Dict:
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(_UpperCAmelCase , param_name="size" , default_to_square=_UpperCAmelCase )
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(_UpperCAmelCase , param_name="crop_size" , default_to_square=_UpperCAmelCase )
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase = image_std if image_std is not None else self.image_std
_lowerCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCAmelCase = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCAmelCase = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_center_crop:
_lowerCAmelCase = [self.center_crop(image=_UpperCAmelCase , size=_UpperCAmelCase ) for image in images]
if do_rescale:
_lowerCAmelCase = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
_lowerCAmelCase = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
_lowerCAmelCase = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
_lowerCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 158 |
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
A = logging.get_logger('''transformers.models.encodec''')
A = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
A = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
A = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
A = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
A = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
A = []
A = []
def __A ( a_ :Optional[int] , a_ :str , a_ :Optional[Any] , a_ :Optional[Any] , a_ :Tuple) -> str:
for attribute in key.split('''.'''):
__a : Union[str, Any] = getattr(a_ , a_)
if weight_type is not None:
__a : Optional[Any] = getattr(a_ , a_).shape
else:
__a : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""")
if weight_type == "weight":
__a : Tuple = value
elif weight_type == "weight_g":
__a : List[str] = value
elif weight_type == "weight_v":
__a : Optional[int] = value
elif weight_type == "bias":
__a : List[str] = value
elif weight_type == "running_mean":
__a : List[str] = value
elif weight_type == "running_var":
__a : List[Any] = value
elif weight_type == "num_batches_tracked":
__a : List[Any] = value
elif weight_type == "weight_ih_l0":
__a : Optional[int] = value
elif weight_type == "weight_hh_l0":
__a : Any = value
elif weight_type == "bias_ih_l0":
__a : Union[str, Any] = value
elif weight_type == "bias_hh_l0":
__a : Optional[Any] = value
elif weight_type == "weight_ih_l1":
__a : Dict = value
elif weight_type == "weight_hh_l1":
__a : str = value
elif weight_type == "bias_ih_l1":
__a : Union[str, Any] = value
elif weight_type == "bias_hh_l1":
__a : Union[str, Any] = value
else:
__a : str = value
logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""")
def __A ( a_ :Dict , a_ :Any) -> Tuple:
for key in ignore_keys:
if key.endswith('''.*'''):
if name.startswith(key[:-1]):
return True
elif ".*." in key:
__a , __a : Union[str, Any] = key.split('''.*.''')
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __A ( a_ :Optional[Any] , a_ :Tuple , a_ :List[str]) -> Any:
__a : Tuple = []
if model_name == "encodec_24khz" or "encodec_32khz":
__a : int = MAPPING_24K
elif model_name == "encodec_48khz":
__a : List[str] = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""")
for name, value in orig_dict.items():
if should_ignore(a_ , a_):
logger.info(F"""{name} was ignored""")
continue
__a : Tuple = False
for key, mapped_key in MAPPING.items():
if "*" in key:
__a , __a : Optional[Any] = key.split('''.*.''')
if prefix in name and suffix in name:
__a : int = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''') and name.endswith('''embed_avg'''):
continue
__a : List[str] = True
if "*" in mapped_key:
__a : Optional[Any] = name.split(a_)[0].split('''.''')[-2]
__a : str = mapped_key.replace('''*''' , a_)
if "weight_g" in name:
__a : Optional[Any] = '''weight_g'''
elif "weight_v" in name:
__a : Optional[Any] = '''weight_v'''
elif "weight_ih_l0" in name:
__a : Tuple = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
__a : Tuple = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
__a : List[str] = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
__a : int = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
__a : Optional[int] = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
__a : List[str] = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
__a : List[str] = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
__a : str = '''bias_hh_l1'''
elif "bias" in name:
__a : Union[str, Any] = '''bias'''
elif "weight" in name:
__a : Any = '''weight'''
elif "running_mean" in name:
__a : List[Any] = '''running_mean'''
elif "running_var" in name:
__a : int = '''running_var'''
elif "num_batches_tracked" in name:
__a : int = '''num_batches_tracked'''
else:
__a : List[str] = None
set_recursively(a_ , a_ , a_ , a_ , a_)
continue
if not is_used:
unused_weights.append(a_)
logger.warning(F"""Unused weights: {unused_weights}""")
@torch.no_grad()
def __A ( a_ :Dict , a_ :Optional[int] , a_ :Union[str, Any] , a_ :Any=None , a_ :Tuple=None , ) -> List[Any]:
if config_path is not None:
__a : List[str] = EncodecConfig.from_pretrained(a_)
else:
__a : List[Any] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
__a : List[Any] = [8, 5, 4, 4]
__a : int = [2.2]
__a : int = 64
__a : List[Any] = 3_20_00
__a : Union[str, Any] = 20_48
__a : Optional[int] = False
__a : str = False
__a : Dict = False
elif model_name == "encodec_48khz":
__a : Any = [8, 5, 4, 2]
__a : Dict = [3.0, 6.0, 1_2.0, 2_4.0]
__a : List[Any] = 4_80_00
__a : Dict = 2
__a : int = False
__a : List[str] = '''time_group_norm'''
__a : str = True
__a : Dict = 1.0
__a : Optional[Any] = 0.0_1
else:
raise ValueError(F"""Unknown model name: {model_name}""")
__a : Union[str, Any] = EncodecModel(a_)
__a : List[Any] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(a_)
__a : List[Any] = torch.load(a_)
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
__a : Optional[Any] = original_checkpoint['''best_state''']
recursively_load_weights(a_ , a_ , a_)
model.save_pretrained(a_)
if repo_id:
print('''Pushing to the hub...''')
feature_extractor.push_to_hub(a_)
model.push_to_hub(a_)
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
A = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 160 | 0 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowerCAmelCase__ = None
lowerCAmelCase__ = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowerCAmelCase__ = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class a_ :
'''simple docstring'''
UpperCAmelCase_ = True
UpperCAmelCase_ = None
# Automatically constructed
UpperCAmelCase_ = "PIL.Image.Image"
UpperCAmelCase_ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
UpperCAmelCase_ = field(default='Image' , init=SCREAMING_SNAKE_CASE , repr=SCREAMING_SNAKE_CASE )
def __call__( self : Dict):
'''simple docstring'''
return self.pa_type
def __snake_case ( self : str , lowercase__ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.')
if isinstance(lowercase__ , lowercase__):
lowerCAmelCase__ = np.array(lowercase__)
if isinstance(lowercase__ , lowercase__):
return {"path": value, "bytes": None}
elif isinstance(lowercase__ , lowercase__):
return {"path": None, "bytes": value}
elif isinstance(lowercase__ , np.ndarray):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowercase__)
elif isinstance(lowercase__ , PIL.Image.Image):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowercase__)
elif value.get('path') is not None and os.path.isfile(value['path']):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path')}
elif value.get('bytes') is not None or value.get('path') is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes'), "path": value.get('path')}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""")
def __snake_case ( self : List[Any] , lowercase__ : dict , lowercase__ : Optional[int]=None):
'''simple docstring'''
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.')
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.')
if token_per_repo_id is None:
lowerCAmelCase__ = {}
lowerCAmelCase__ , lowerCAmelCase__ = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""")
else:
if is_local_path(lowercase__):
lowerCAmelCase__ = PIL.Image.open(lowercase__)
else:
lowerCAmelCase__ = path.split('::')[-1]
try:
lowerCAmelCase__ = string_to_dict(lowercase__ , config.HUB_DATASETS_URL)['repo_id']
lowerCAmelCase__ = token_per_repo_id.get(lowercase__)
except ValueError:
lowerCAmelCase__ = None
with xopen(lowercase__ , 'rb' , use_auth_token=lowercase__) as f:
lowerCAmelCase__ = BytesIO(f.read())
lowerCAmelCase__ = PIL.Image.open(bytes_)
else:
lowerCAmelCase__ = PIL.Image.open(BytesIO(bytes_))
image.load() # to avoid "Too many open files" errors
return image
def __snake_case ( self : Dict):
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary'),
"path": Value('string'),
}
)
def __snake_case ( self : Union[str, Any] , lowercase__ : Union[pa.StringArray, pa.StructArray, pa.ListArray]):
'''simple docstring'''
if pa.types.is_string(storage.type):
lowerCAmelCase__ = pa.array([None] * len(lowercase__) , type=pa.binary())
lowerCAmelCase__ = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
lowerCAmelCase__ = pa.array([None] * len(lowercase__) , type=pa.string())
lowerCAmelCase__ = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('bytes') >= 0:
lowerCAmelCase__ = storage.field('bytes')
else:
lowerCAmelCase__ = pa.array([None] * len(lowercase__) , type=pa.binary())
if storage.type.get_field_index('path') >= 0:
lowerCAmelCase__ = storage.field('path')
else:
lowerCAmelCase__ = pa.array([None] * len(lowercase__) , type=pa.string())
lowerCAmelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_list(storage.type):
lowerCAmelCase__ = pa.array(
[encode_np_array(np.array(lowercase__))['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowerCAmelCase__ = pa.array([None] * len(lowercase__) , type=pa.string())
lowerCAmelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null())
return array_cast(lowercase__ , self.pa_type)
def __snake_case ( self : List[Any] , lowercase__ : pa.StructArray):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(lowercase__ : Union[str, Any]):
with xopen(lowercase__ , 'rb') as f:
lowerCAmelCase__ = f.read()
return bytes_
lowerCAmelCase__ = pa.array(
[
(path_to_bytes(x['path']) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCAmelCase__ = pa.array(
[os.path.basename(lowercase__) if path is not None else None for path in storage.field('path').to_pylist()] , type=pa.string() , )
lowerCAmelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null())
return array_cast(lowercase__ , self.pa_type)
def __lowerCamelCase ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowerCAmelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = BytesIO()
if image.format in list_image_compression_formats():
lowerCAmelCase__ = image.format
else:
lowerCAmelCase__ = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(lowerCAmelCase__ , format=lowerCAmelCase__ )
return buffer.getvalue()
def __lowerCamelCase ( lowerCAmelCase__ ):
if hasattr(lowerCAmelCase__ , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase__ )}
def __lowerCamelCase ( lowerCAmelCase__ ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
lowerCAmelCase__ = array.dtype
lowerCAmelCase__ = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
lowerCAmelCase__ = dtype.kind
lowerCAmelCase__ = dtype.itemsize
lowerCAmelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowerCAmelCase__ = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowerCAmelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowerCAmelCase__ = dtype_byteorder + dtype_kind + str(lowerCAmelCase__ )
lowerCAmelCase__ = np.dtype(lowerCAmelCase__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
lowerCAmelCase__ = PIL.Image.fromarray(array.astype(lowerCAmelCase__ ) )
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase__ )}
def __lowerCamelCase ( lowerCAmelCase__ ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
lowerCAmelCase__ , lowerCAmelCase__ = first_non_null_value(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowerCAmelCase__ , np.ndarray ):
lowerCAmelCase__ = no_op_if_value_is_null(lowerCAmelCase__ )
return [obj_to_image_dict_func(lowerCAmelCase__ ) for obj in objs]
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
lowerCAmelCase__ = no_op_if_value_is_null(lowerCAmelCase__ )
return [obj_to_image_dict_func(lowerCAmelCase__ ) for obj in objs]
else:
return objs
else:
return objs
| 119 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 119 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.